diff --git a/.circleci/codecov.sh b/.circleci/codecov.sh deleted file mode 100644 index 1ef332b1b..000000000 --- a/.circleci/codecov.sh +++ /dev/null @@ -1,1550 +0,0 @@ -#!/usr/bin/env bash - -# Apache License Version 2.0, January 2004 -# https://github.com/codecov/codecov-bash/blob/master/LICENSE - - -set -e +o pipefail - -VERSION="0b37652" - -url="https://codecov.io" -env="$CODECOV_ENV" -service="" -token="" -search_in="" -flags="" -exit_with=0 -curlargs="" -curlawsargs="" -dump="0" -clean="0" -curl_s="-s" -name="$CODECOV_NAME" -include_cov="" -exclude_cov="" -ddp="$(echo ~)/Library/Developer/Xcode/DerivedData" -xp="" -files="" -cacert="$CODECOV_CA_BUNDLE" -gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'" -gcov_include="" - -ft_gcov="1" -ft_coveragepy="1" -ft_fix="1" -ft_search="1" -ft_s3="1" -ft_network="1" -ft_xcodellvm="1" -ft_xcodeplist="0" - -_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo $PWD) -git_root="$_git_root" -codecov_yml="" -remote_addr="" -if [ "$git_root" = "$PWD" ]; -then - git_root="." -fi - -url_o="" -pr_o="" -build_o="" -commit_o="" -search_in_o="" -tag_o="" -branch_o="" -slug_o="" -prefix_o="" - -commit="$VCS_COMMIT_ID" -branch="$VCS_BRANCH_NAME" -pr="$VCS_PULL_REQUEST" -slug="$VCS_SLUG" -tag="$VCS_TAG" -build_url="$CI_BUILD_URL" -build="$CI_BUILD_ID" -job="$CI_JOB_ID" - -beta_xcode_partials="" - -proj_root="$git_root" -gcov_exe="gcov" -gcov_arg="" - -b="\033[0;36m" -g="\033[0;32m" -r="\033[0;31m" -e="\033[0;90m" -x="\033[0m" - -show_help() { -cat << EOF - - Codecov Bash $VERSION - - Global report uploading tool for Codecov - Documentation at https://docs.codecov.io/docs - Contribute at https://github.com/codecov/codecov-bash - - - -h Display this help and exit - -f FILE Target file(s) to upload - - -f "path/to/file" only upload this file - skips searching unless provided patterns below - - -f '!*.bar' ignore all files at pattern *.bar - -f '*.foo' include all files at pattern *.foo - Must use single quotes. - This is non-exclusive, use -s "*.foo" to match specific paths. - - -s DIR Directory to search for coverage reports. - Already searches project root and artifact folders. - -t TOKEN Set the private repository token - (option) set environment variable CODECOV_TOKEN=:uuid - - -t @/path/to/token_file - -t uuid - - -n NAME Custom defined name of the upload. Visible in Codecov UI - - -e ENV Specify environment variables to be included with this build - Also accepting environment variables: CODECOV_ENV=VAR,VAR2 - - -e VAR,VAR2 - - -X feature Toggle functionalities - - -X gcov Disable gcov - -X coveragepy Disable python coverage - -X fix Disable report fixing - -X search Disable searching for reports - -X xcode Disable xcode processing - -X network Disable uploading the file network - - -R root dir Used when not in git/hg project to identify project root directory - -y conf file Used to specify the location of the .codecov.yml config file - -F flag Flag the upload to group coverage metrics - - -F unittests This upload is only unittests - -F integration This upload is only integration tests - -F ui,chrome This upload is Chrome - UI tests - - -c Move discovered coverage reports to the trash - -Z Exit with 1 if not successful. Default will Exit with 0 - - -- xcode -- - -D Custom Derived Data Path for Coverage.profdata and gcov processing - Default '~/Library/Developer/Xcode/DerivedData' - -J Specify packages to build coverage. - This can significantly reduces time to build coverage reports. - - -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests" - -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests" - - -- gcov -- - -g GLOB Paths to ignore during gcov gathering - -G GLOB Paths to include during gcov gathering - -p dir Project root directory - Also used when preparing gcov - -k prefix Prefix filepaths to help resolve path fixing: https://github.com/codecov/support/issues/472 - -x gcovexe gcov executable to run. Defaults to 'gcov' - -a gcovargs extra arguments to pass to gcov - - -- Override CI Environment Variables -- - These variables are automatically detected by popular CI providers - - -B branch Specify the branch name - -C sha Specify the commit sha - -P pr Specify the pull request number - -b build Specify the build number - -T tag Specify the git tag - - -- Enterprise -- - -u URL Set the target url for Enterprise customers - Not required when retrieving the bash uploader from your CCE - (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com - -r SLUG owner/repo slug used instead of the private repo token in Enterprise - (option) set environment variable CODECOV_SLUG=:owner/:repo - (option) set in your codecov.yml "codecov.slug" - -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional) - (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem" - -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy" - -A curlargs Extra curl arguments to communicate with AWS. - - -- Debugging -- - -d Don't upload, but dump upload file to stdout - -K Remove color from the output - -v Verbose mode - -EOF -} - - -say() { - echo -e "$1" -} - - -urlencode() { - echo "$1" | curl -Gso /dev/null -w %{url_effective} --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//' -} - - -swiftcov() { - _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g') - for _type in app framework xctest - do - find "$_dir" -name "*.$_type" | while read f - do - _proj=${f##*/} - _proj=${_proj%."$_type"} - if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ]; - then - say " $g+$x Building reports for $_proj $_type" - dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj") - _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g') - xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \ - || say " ${r}x>${x} llvm-cov failed to produce results for $dest" - fi - done - done -} - - -# Credits to: https://gist.github.com/pkuczynski/8665367 -parse_yaml() { - local prefix=$2 - local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') - sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ - -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | - awk -F$fs '{ - indent = length($1)/2; - vname[indent] = $2; - for (i in vname) {if (i > indent) {delete vname[i]}} - if (length($3) > 0) { - vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")} - printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3); - } - }' -} - - -if [ $# != 0 ]; -then - while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hJ:k:Kn:p:P:r:R:y:s:S:t:T:u:U:vx:X:Z" o - do - case "$o" in - "a") - gcov_arg=$OPTARG - ;; - "A") - curlawsargs="$OPTARG" - ;; - "b") - build_o="$OPTARG" - ;; - "B") - branch_o="$OPTARG" - ;; - "c") - clean="1" - ;; - "C") - commit_o="$OPTARG" - ;; - "d") - dump="1" - ;; - "D") - ddp="$OPTARG" - ;; - "e") - env="$env,$OPTARG" - ;; - "f") - if [ "${OPTARG::1}" = "!" ]; - then - exclude_cov="$exclude_cov -not -path '${OPTARG:1}'" - - elif [[ "$OPTARG" = *"*"* ]]; - then - include_cov="$include_cov -or -name '$OPTARG'" - - else - ft_search=0 - if [ "$files" = "" ]; - then - files="$OPTARG" - else - files="$files -$OPTARG" - fi - fi - ;; - "F") - if [ "$flags" = "" ]; - then - flags="$OPTARG" - else - flags="$flags,$OPTARG" - fi - ;; - "g") - gcov_ignore="$gcov_ignore -not -path '$OPTARG'" - ;; - "G") - gcov_include="$gcov_include -path '$OPTARG'" - ;; - "h") - show_help - exit 0; - ;; - "J") - ft_xcodellvm="1" - ft_xcodeplist="0" - if [ "$xp" = "" ]; - then - xp="$OPTARG" - else - xp="$xp\|$OPTARG" - fi - ;; - "k") - prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::') - ;; - "K") - b="" - g="" - r="" - e="" - x="" - ;; - "n") - name="$OPTARG" - ;; - "p") - proj_root="$OPTARG" - ;; - "P") - pr_o="$OPTARG" - ;; - "r") - slug_o="$OPTARG" - ;; - "R") - git_root="$OPTARG" - ;; - "s") - if [ "$search_in_o" = "" ]; - then - search_in_o="$OPTARG" - else - search_in_o="$search_in_o $OPTARG" - fi - ;; - "S") - cacert="--cacert \"$OPTARG\"" - ;; - "t") - if [ "${OPTARG::1}" = "@" ]; - then - token=$(cat "${OPTARG:1}" | tr -d ' \n') - else - token="$OPTARG" - fi - ;; - "T") - tag_o="$OPTARG" - ;; - "u") - url_o=$(echo "$OPTARG" | sed -e 's/\/$//') - ;; - "U") - curlargs="$OPTARG" - ;; - "v") - set -x - curl_s="" - ;; - "x") - gcov_exe=$OPTARG - ;; - "X") - if [ "$OPTARG" = "gcov" ]; - then - ft_gcov="0" - elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ]; - then - ft_coveragepy="0" - elif [ "$OPTARG" = "xcodellvm" ]; - then - ft_xcodellvm="1" - ft_xcodeplist="0" - elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ]; - then - ft_fix="0" - elif [ "$OPTARG" = "xcode" ]; - then - ft_xcodellvm="0" - ft_xcodeplist="0" - elif [ "$OPTARG" = "search" ]; - then - ft_search="0" - elif [ "$OPTARG" = "xcodepartials" ]; - then - beta_xcode_partials="-use-color" - elif [ "$OPTARG" = "network" ]; - then - ft_network="0" - elif [ "$OPTARG" = "s3" ]; - then - ft_s3="0" - fi - ;; - "y") - codecov_yml="$OPTARG" - ;; - "Z") - exit_with=1 - ;; - esac - done -fi - -say " - _____ _ - / ____| | | -| | ___ __| | ___ ___ _____ __ -| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / / -| |___| (_) | (_| | __/ (_| (_) \\ V / - \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/ - Bash-$VERSION - -" - -search_in="$proj_root" - -if [ "$JENKINS_URL" != "" ]; -then - say "$e==>$x Jenkins CI detected." - # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project - # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables - service="jenkins" - - if [ "$ghprbSourceBranch" != "" ]; - then - branch="$ghprbSourceBranch" - elif [ "$GIT_BRANCH" != "" ]; - then - branch="$GIT_BRANCH" - elif [ "$BRANCH_NAME" != "" ]; - then - branch="$BRANCH_NAME" - fi - - if [ "$ghprbActualCommit" != "" ]; - then - commit="$ghprbActualCommit" - elif [ "$GIT_COMMIT" != "" ]; - then - commit="$GIT_COMMIT" - fi - - if [ "$ghprbPullId" != "" ]; - then - pr="$ghprbPullId" - elif [ "$CHANGE_ID" != "" ]; - then - pr="$CHANGE_ID" - fi - - build="$BUILD_NUMBER" - build_url=$(urlencode "$BUILD_URL") - -elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ]; -then - say "$e==>$x Travis CI detected." - # https://docs.travis-ci.com/user/environment-variables/ - service="travis" - commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}" - build="$TRAVIS_JOB_NUMBER" - pr="$TRAVIS_PULL_REQUEST" - job="$TRAVIS_JOB_ID" - slug="$TRAVIS_REPO_SLUG" - env="$env,TRAVIS_OS_NAME" - tag="$TRAVIS_TAG" - if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ]; - then - branch="$TRAVIS_BRANCH" - fi - - language=$(printenv | grep "TRAVIS_.*_VERSION" | head -1) - if [ "$language" != "" ]; - then - env="$env,${language%=*}" - fi - -elif [ "$DOCKER_REPO" != "" ]; -then - say "$e==>$x Docker detected." - # https://docs.docker.com/docker-cloud/builds/advanced/ - service="docker" - branch="$SOURCE_BRANCH" - commit="$SOURCE_COMMIT" - slug="$DOCKER_REPO" - tag="$CACHE_TAG" - env="$env,IMAGE_NAME" - -elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ]; -then - say "$e==>$x Codeship CI detected." - # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/ - service="codeship" - branch="$CI_BRANCH" - build="$CI_BUILD_NUMBER" - build_url=$(urlencode "$CI_BUILD_URL") - commit="$CI_COMMIT_ID" - -elif [ ! -z "$CF_BUILD_URL" ] && [ ! -z "$CF_BUILD_ID" ]; -then - say "$e==>$x Codefresh CI detected." - # https://docs.codefresh.io/v1.0/docs/variables - service="codefresh" - branch="$CF_BRANCH" - build="$CF_BUILD_ID" - build_url=$(urlencode "$CF_BUILD_URL") - commit="$CF_REVISION" - -elif [ "$TEAMCITY_VERSION" != "" ]; -then - say "$e==>$x TeamCity CI detected." - # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters - # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298 - if [ "$TEAMCITY_BUILD_BRANCH" = '' ]; - then - echo " Teamcity does not automatically make build parameters available as environment variables." - echo " Add the following environment parameters to the build configuration" - echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%" - echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%" - echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%" - echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%" - echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%" - fi - service="teamcity" - branch="$TEAMCITY_BUILD_BRANCH" - build="$TEAMCITY_BUILD_ID" - build_url=$(urlencode "$TEAMCITY_BUILD_URL") - if [ "$TEAMCITY_BUILD_COMMIT" != "" ]; - then - commit="$TEAMCITY_BUILD_COMMIT" - else - commit="$BUILD_VCS_NUMBER" - fi - remote_addr="$TEAMCITY_BUILD_REPOSITORY" - -elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ]; -then - say "$e==>$x Circle CI detected." - # https://circleci.com/docs/environment-variables - service="circleci" - branch="$CIRCLE_BRANCH" - build="$CIRCLE_BUILD_NUM" - job="$CIRCLE_NODE_INDEX" - if [ "$CIRCLE_PROJECT_REPONAME" != "" ]; - then - slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME" - else - # git@github.com:owner/repo.git - slug="${CIRCLE_REPOSITORY_URL##*:}" - # owner/repo.git - slug="${slug%%.git}" - fi - pr="$CIRCLE_PR_NUMBER" - commit="$CIRCLE_SHA1" - search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS" - -elif [ "$BUDDYBUILD_BRANCH" != "" ]; -then - say "$e==>$x buddybuild detected" - # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps - service="buddybuild" - branch="$BUDDYBUILD_BRANCH" - build="$BUDDYBUILD_BUILD_NUMBER" - build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID" - # BUDDYBUILD_TRIGGERED_BY - if [ "$ddp" = "$(echo ~)/Library/Developer/Xcode/DerivedData" ]; - then - ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest" - fi - -elif [ "${bamboo_planRepository_revision}" != "" ]; -then - say "$e==>$x Bamboo detected" - # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables - service="bamboo" - commit="${bamboo_planRepository_revision}" - branch="${bamboo_planRepository_branch}" - build="${bamboo_buildNumber}" - build_url="${bamboo_buildResultsUrl}" - remote_addr="${bamboo_planRepository_repositoryUrl}" - -elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ]; -then - # http://devcenter.bitrise.io/faq/available-environment-variables/ - say "$e==>$x Bitrise CI detected." - service="bitrise" - branch="$BITRISE_GIT_BRANCH" - build="$BITRISE_BUILD_NUMBER" - build_url=$(urlencode "$BITRISE_BUILD_URL") - pr="$BITRISE_PULL_REQUEST" - if [ "$GIT_CLONE_COMMIT_HASH" != "" ]; - then - commit="$GIT_CLONE_COMMIT_HASH" - fi - -elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ]; -then - say "$e==>$x Semaphore CI detected." - # https://semaphoreapp.com/docs/available-environment-variables.html - service="semaphore" - branch="$BRANCH_NAME" - build="$SEMAPHORE_BUILD_NUMBER" - job="$SEMAPHORE_CURRENT_THREAD" - pr="$PULL_REQUEST_NUMBER" - slug="$SEMAPHORE_REPO_SLUG" - commit="$REVISION" - env="$env,SEMAPHORE_TRIGGER_SOURCE" - -elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ]; -then - say "$e==>$x Buildkite CI detected." - # https://buildkite.com/docs/guides/environment-variables - service="buildkite" - branch="$BUILDKITE_BRANCH" - build="$BUILDKITE_BUILD_NUMBER" - job="$BUILDKITE_JOB_ID" - build_url=$(urlencode "$BUILDKITE_BUILD_URL") - slug="$BUILDKITE_PROJECT_SLUG" - commit="$BUILDKITE_COMMIT" - if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then - pr="$BUILDKITE_PULL_REQUEST" - fi - tag="$BUILDKITE_TAG" - -elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ]; -then - say "$e==>$x Drone CI detected." - # http://docs.drone.io/env.html - # drone commits are not full shas - service="drone.io" - branch="$DRONE_BRANCH" - build="$DRONE_BUILD_NUMBER" - build_url=$(urlencode "${DRONE_BUILD_LINK}") - pr="$DRONE_PULL_REQUEST" - job="$DRONE_JOB_NUMBER" - tag="$DRONE_TAG" - -elif [ "$HEROKU_TEST_RUN_BRANCH" != "" ]; -then - say "$e==>$x Heroku CI detected." - # https://devcenter.heroku.com/articles/heroku-ci#environment-variables - service="heroku" - branch="$HEROKU_TEST_RUN_BRANCH" - build="$HEROKU_TEST_RUN_ID" - -elif [ "$CI" = "True" ] && [ "$APPVEYOR" = "True" ]; -then - say "$e==>$x Appveyor CI detected." - # http://www.appveyor.com/docs/environment-variables - service="appveyor" - branch="$APPVEYOR_REPO_BRANCH" - build=$(urlencode "$APPVEYOR_JOB_ID") - pr="$APPVEYOR_PULL_REQUEST_NUMBER" - job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION" - slug="$APPVEYOR_REPO_NAME" - commit="$APPVEYOR_REPO_COMMIT" - -elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ]; -then - say "$e==>$x Wercker CI detected." - # http://devcenter.wercker.com/articles/steps/variables.html - service="wercker" - branch="$WERCKER_GIT_BRANCH" - build="$WERCKER_MAIN_PIPELINE_STARTED" - slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY" - commit="$WERCKER_GIT_COMMIT" - -elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ]; -then - say "$e==>$x Magnum CI detected." - # https://magnum-ci.com/docs/environment - service="magnum" - branch="$CI_BRANCH" - build="$CI_BUILD_NUMBER" - commit="$CI_COMMIT" - -elif [ "$SHIPPABLE" = "true" ]; -then - say "$e==>$x Shippable CI detected." - # http://docs.shippable.com/ci_configure/ - service="shippable" - branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH") - build="$BUILD_NUMBER" - build_url=$(urlencode "$BUILD_URL") - pr="$PULL_REQUEST" - slug="$REPO_FULL_NAME" - commit="$COMMIT" - -elif [ "$TDDIUM" = "true" ]; -then - say "Solano CI detected." - # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/ - service="solano" - commit="$TDDIUM_CURRENT_COMMIT" - branch="$TDDIUM_CURRENT_BRANCH" - build="$TDDIUM_TID" - pr="$TDDIUM_PR_ID" - -elif [ "$GREENHOUSE" = "true" ]; -then - say "$e==>$x Greenhouse CI detected." - # http://docs.greenhouseci.com/docs/environment-variables-files - service="greenhouse" - branch="$GREENHOUSE_BRANCH" - build="$GREENHOUSE_BUILD_NUMBER" - build_url=$(urlencode "$GREENHOUSE_BUILD_URL") - pr="$GREENHOUSE_PULL_REQUEST" - commit="$GREENHOUSE_COMMIT" - search_in="$search_in $GREENHOUSE_EXPORT_DIR" - -elif [ "$GITLAB_CI" != "" ]; -then - say "$e==>$x GitLab CI detected." - # http://doc.gitlab.com/ce/ci/variables/README.html - service="gitlab" - branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}" - build="${CI_BUILD_ID:-$CI_JOB_ID}" - remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}" - commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}" - -else - say "${r}x>${x} No CI provider detected." - say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}" - say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}" - -fi - -say " ${e}project root:${x} $git_root" - -# find branch, commit, repo from git command -if [ "$GIT_BRANCH" != "" ]; -then - branch="$GIT_BRANCH" - -elif [ "$branch" = "" ]; -then - branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "") - if [ "$branch" = "HEAD" ]; - then - branch="" - fi -fi - -if [ "$commit_o" = "" ]; -then - # merge commit -> actual commit - mc= - if [ -n "$pr" ] && [ "$pr" != false ]; - then - mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") - fi - if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; - then - say " Fixing merge commit SHA" - commit=$(echo "$mc" | cut -d' ' -f2) - elif [ "$GIT_COMMIT" != "" ]; - then - commit="$GIT_COMMIT" - elif [ "$commit" = "" ]; - then - commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "") - fi -else - commit="$commit_o" -fi - -if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ]; -then - say "${e}-->${x} token set from env" - token="$CODECOV_TOKEN" -fi - -if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ]; -then - say "${e}-->${x} url set from env" - url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//') -fi - -if [ "$CODECOV_SLUG" != "" ]; -then - say "${e}-->${x} slug set from env" - slug_o="$CODECOV_SLUG" - -elif [ "$slug" = "" ]; -then - if [ "$remote_addr" = "" ]; - then - remote_addr=$(git config --get remote.origin.url || hg paths default || echo '') - fi - if [ "$remote_addr" != "" ]; - then - if echo "$remote_addr" | grep -q "//"; then - # https - slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//') - else - # ssh - slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//') - fi - fi - if [ "$slug" = "/" ]; - then - slug="" - fi -fi - -yaml=$(test -n "$codecov_yml" && echo "$codecov_yml" \ - || cd "$git_root" && \ - git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \ - || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \ - || cd $proj_root && find . -type f -name '*codecov.y*ml' -depth 1 2>/dev/null \ - || echo '') -yaml=$(echo "$yaml" | head -1) - -if [ "$yaml" != "" ]; -then - say " ${e}Yaml found at:${x} $yaml" - config=$(parse_yaml "$git_root/$yaml" || echo '') - - # TODO validate the yaml here - - if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ]; - then - say "${e}-->${x} token set from yaml" - token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')" - fi - - if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ]; - then - say "${e}-->${x} url set from yaml" - url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')" - fi - - if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ]; - then - say "${e}-->${x} slug set from yaml" - slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')" - fi -else - say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}" - -fi - -if [ "$branch_o" != "" ]; -then - branch=$(urlencode "$branch_o") -else - branch=$(urlencode "$branch") -fi - -query="branch=$branch\ - &commit=$commit\ - &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\ - &build_url=$build_url\ - &name=$(urlencode "$name")\ - &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\ - &slug=$([ "$slug_o" = "" ] && urlencode "$slug" || urlencode "$slug_o")\ - &service=$service\ - &flags=$flags\ - &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\ - &job=$job" - -if [ "$ft_search" = "1" ]; -then - # detect bower comoponents location - bower_components="bower_components" - bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "") - if [ "$bower_rc" != "" ]; - then - bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//') - if [ "$bower_components" = "" ]; - then - bower_components="bower_components" - fi - fi - - # Swift Coverage - if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ]; - then - say "${e}==>${x} Processing Xcode reports via llvm-cov" - say " DerivedData folder: $ddp" - profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '') - if [ "$profdata_files" != "" ]; - then - # xcode via profdata - if [ "$xp" = "" ]; - then - # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/') - # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}" - say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)" - say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}" - fi - while read -r profdata; - do - if [ "$profdata" != "" ]; - then - swiftcov "$profdata" "$xp" - fi - done <<< "$profdata_files" - else - say " ${e}->${x} No Swift coverage found" - fi - - # Obj-C Gcov Coverage - if [ "$ft_gcov" = "1" ]; - then - say " ${e}->${x} Running $gcov_exe for Obj-C" - bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true - fi - fi - - if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ]; - then - say "${e}==>${x} Processing Xcode plists" - plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '') - if [ "$plists_files" != "" ]; - then - while read -r plist; - do - if [ "$plist" != "" ]; - then - say " ${g}Found${x} plist file at $plist" - plutil -convert xml1 -o "$(basename "$plist").plist" -- $plist - fi - done <<< "$plists_files" - fi - fi - - # Gcov Coverage - if [ "$ft_gcov" = "1" ]; - then - say "${e}==>${x} Running gcov in $proj_root ${e}(disable via -X gcov)${x}" - bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true - else - say "${e}==>${x} gcov disabled" - fi - - # Python Coverage - if [ "$ft_coveragepy" = "1" ]; - then - if [ ! -f coverage.xml ]; - then - if which coverage >/dev/null 2>&1; - then - say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}" - - dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '') - if [ "$dotcoverage" != "" ]; - then - cd "$(dirname "$dotcoverage")" - if [ ! -f .coverage ]; - then - say " ${e}->${x} Running coverage combine" - coverage combine -a - fi - say " ${e}->${x} Running coverage xml" - if [ "$(coverage xml -i)" != "No data to report." ]; - then - files="$files -$PWD/coverage.xml" - else - say " ${r}No data to report.${x}" - fi - cd "$proj_root" - else - say " ${r}No .coverage file found.${x}" - fi - else - say "${e}==>${x} Python coveragepy not found" - fi - fi - else - say "${e}==>${x} Python coveragepy disabled" - fi - - if [ "$search_in_o" != "" ]; - then - # location override - search_in="$search_in_o" - fi - - say "$e==>$x Searching for coverage reports in:" - for _path in $search_in - do - say " ${g}+${x} $_path" - done - - patterns="find $search_in \( \ - -name vendor \ - -or -name htmlcov \ - -or -name virtualenv \ - -or -name js/generated/coverage \ - -or -name .virtualenv \ - -or -name virtualenvs \ - -or -name .virtualenvs \ - -or -name .env \ - -or -name .envs \ - -or -name env \ - -or -name .yarn-cache \ - -or -name envs \ - -or -name .venv \ - -or -name .venvs \ - -or -name venv \ - -or -name venvs \ - -or -name .git \ - -or -name .hg \ - -or -name .tox \ - -or -name __pycache__ \ - -or -name '.egg-info*' \ - -or -name '$bower_components' \ - -or -name node_modules \ - -or -name 'conftest_*.c.gcov' \ - \) -prune -or \ - -type f \( -name '*coverage*.*' \ - -or -name 'nosetests.xml' \ - -or -name 'jacoco*.xml' \ - -or -name 'clover.xml' \ - -or -name 'report.xml' \ - -or -name '*.codecov.*' \ - -or -name 'codecov.*' \ - -or -name 'cobertura.xml' \ - -or -name 'excoveralls.json' \ - -or -name 'luacov.report.out' \ - -or -name 'coverage-final.json' \ - -or -name 'naxsi.info' \ - -or -name 'lcov.info' \ - -or -name 'lcov.dat' \ - -or -name '*.lcov' \ - -or -name '*.clover' \ - -or -name 'cover.out' \ - -or -name 'gcov.info' \ - -or -name '*.gcov' \ - -or -name '*.lst' \ - $include_cov \) \ - $exclude_cov \ - -not -name '*.profdata' \ - -not -name 'coverage-summary.json' \ - -not -name 'phpunit-code-coverage.xml' \ - -not -name '*/classycle/report.xml' \ - -not -name 'remapInstanbul.coverage*.json' \ - -not -name 'phpunit-coverage.xml' \ - -not -name '*codecov.yml' \ - -not -name '*.serialized' \ - -not -name '.coverage*' \ - -not -name '.*coveragerc' \ - -not -name '*.sh' \ - -not -name '*.bat' \ - -not -name '*.ps1' \ - -not -name '*.env' \ - -not -name '*.cmake' \ - -not -name '*.dox' \ - -not -name '*.ec' \ - -not -name '*.rst' \ - -not -name '*.h' \ - -not -name '*.scss' \ - -not -name '*.o' \ - -not -name '*.proto' \ - -not -name '*.sbt' \ - -not -name '*.xcoverage.*' \ - -not -name '*.gz' \ - -not -name '*.conf' \ - -not -name '*.p12' \ - -not -name '*.csv' \ - -not -name '*.rsp' \ - -not -name '*.m4' \ - -not -name '*.pem' \ - -not -name '*~' \ - -not -name '*.exe' \ - -not -name '*.am' \ - -not -name '*.template' \ - -not -name '*.cp' \ - -not -name '*.bw' \ - -not -name '*.crt' \ - -not -name '*.log' \ - -not -name '*.cmake' \ - -not -name '*.pth' \ - -not -name '*.in' \ - -not -name '*.jar*' \ - -not -name '*.pom*' \ - -not -name '*.png' \ - -not -name '*.jpg' \ - -not -name '*.sql' \ - -not -name '*.jpeg' \ - -not -name '*.svg' \ - -not -name '*.gif' \ - -not -name '*.csv' \ - -not -name '*.snapshot' \ - -not -name '*.mak*' \ - -not -name '*.bash' \ - -not -name '*.data' \ - -not -name '*.py' \ - -not -name '*.class' \ - -not -name '*.xcconfig' \ - -not -name '*.ec' \ - -not -name '*.coverage' \ - -not -name '*.pyc' \ - -not -name '*.cfg' \ - -not -name '*.egg' \ - -not -name '*.ru' \ - -not -name '*.css' \ - -not -name '*.less' \ - -not -name '*.pyo' \ - -not -name '*.whl' \ - -not -name '*.html' \ - -not -name '*.ftl' \ - -not -name '*.erb' \ - -not -name '*.rb' \ - -not -name '*.js' \ - -not -name '*.jade' \ - -not -name '*.db' \ - -not -name '*.md' \ - -not -name '*.cpp' \ - -not -name '*.gradle' \ - -not -name '*.tar.tz' \ - -not -name '*.scss' \ - -not -name 'include.lst' \ - -not -name 'fullLocaleNames.lst' \ - -not -name 'inputFiles.lst' \ - -not -name 'createdFiles.lst' \ - -not -name 'scoverage.measurements.*' \ - -not -name 'test_*_coverage.txt' \ - -not -name 'testrunner-coverage*' \ - -print 2>/dev/null" - files=$(eval "$patterns" || echo '') - -elif [ "$include_cov" != "" ]; -then - files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '') -fi - -num_of_files=$(echo "$files" | wc -l | tr -d ' ') -if [ "$num_of_files" != '' ] && [ "$files" != '' ]; -then - say " ${e}->${x} Found $num_of_files reports" -fi - -# no files found -if [ "$files" = "" ]; -then - say "${r}-->${x} No coverage report found." - say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" - exit ${exit_with}; -fi - -if [ "$ft_network" == "1" ]; -then - say "${e}==>${x} Detecting git/mercurial file structure" - network=$(cd "$git_root" && git ls-files 2>/dev/null || hg locate 2>/dev/null || echo "") - if [ "$network" = "" ]; - then - network=$(find "$git_root" \( \ - -name virtualenv \ - -name .virtualenv \ - -name virtualenvs \ - -name .virtualenvs \ - -name '*.png' \ - -name '*.gif' \ - -name '*.jpg' \ - -name '*.jpeg' \ - -name '*.md' \ - -name .env \ - -name .envs \ - -name env \ - -name envs \ - -name .venv \ - -name .venvs \ - -name venv \ - -name venvs \ - -name .git \ - -name .egg-info \ - -name shunit2-2.1.6 \ - -name vendor \ - -name __pycache__ \ - -name node_modules \ - -path '*/$bower_components/*' \ - -path '*/target/delombok/*' \ - -path '*/build/lib/*' \ - -path '*/js/generated/coverage/*' \ - \) -prune -or \ - -type f -print 2>/dev/null || echo '') - fi - - if [ "$prefix_o" != "" ]; - then - network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}") - fi -fi - -upload_file=`mktemp /tmp/codecov.XXXXXX` -adjustments_file=`mktemp /tmp/codecov.adjustments.XXXXXX` - -cleanup() { - rm -f $upload_file $adjustments_file $upload_file.gz -} - -trap cleanup INT ABRT TERM - -if [ "$env" != "" ]; -then - inc_env="" - say "${e}==>${x} Appending build variables" - for varname in $(echo "$env" | tr ',' ' ') - do - if [ "$varname" != "" ]; - then - say " ${g}+${x} $varname" - inc_env="${inc_env}${varname}=$(eval echo "\$${varname}") -" - fi - done - -echo "$inc_env<<<<<< ENV" >> $upload_file -fi - -# Append git file list -# write discovered yaml location -echo "$yaml" >> $upload_file -if [ "$ft_network" == "1" ]; -then - i="woff|eot|otf" # fonts - i="$i|gif|png|jpg|jpeg|psd" # images - i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|html|csv" # docs - i="$i|yml|yaml|.gitignore" # supporting docs - echo "$network" | grep -vwE "($i)$" >> $upload_file -fi -echo "<<<<<< network" >> $upload_file - -fr=0 -say "${e}==>${x} Reading reports" -while IFS='' read -r file; -do - # read the coverage file - if [ "$(echo "$file" | tr -d ' ')" != '' ]; - then - if [ -f "$file" ]; - then - report_len=$(wc -c < "$file") - if [ "$report_len" -ne 0 ]; - then - say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" - # append to to upload - _filename=$(basename "$file") - if [ "${_filename##*.}" = 'gcov' ]; - then - echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")" >> $upload_file - # get file name - head -1 $file >> $upload_file - # 1. remove source code - # 2. remove ending bracket lines - # 3. remove whitespace - # 4. remove contextual lines - # 5. remove function names - awk -F': *' '{print $1":"$2":"}' $file \ - | sed '\/: *} *$/d' \ - | sed 's/^ *//' \ - | sed '/^-/d' \ - | sed 's/^function.*/func/' >> $upload_file - else - echo "# path=$(echo "$file" | sed "s|^$git_root/||")" >> $upload_file - cat "$file" >> $upload_file - fi - echo "<<<<<< EOF" >> $upload_file - fr=1 - if [ "$clean" = "1" ]; - then - rm "$file" - fi - else - say " ${r}-${x} Skipping empty file $file" - fi - else - say " ${r}-${x} file not found at $file" - fi - fi -done <<< "$(echo -e "$files")" - -if [ "$fr" = "0" ]; -then - say "${r}-->${x} No coverage data found." - say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" - say " search for your projects language to learn how to collect reports." - exit ${exit_with}; -fi - -if [ "$ft_fix" = "1" ]; -then - say "${e}==>${x} Appending adjustments" - say " ${b}http://docs.codecov.io/docs/fixing-reports${x}" - - empty_line='^[[:space:]]*$' - # // - syntax_comment='^[[:space:]]*//.*' - # /* or */ - syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$' - # { or } - syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$' - # [ or ] - syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$' - - skip_dirs="-not -path '*/$bower_components/*' \ - -not -path '*/node_modules/*'" - - cut_and_join() { - awk 'BEGIN { FS=":" } - $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next } - $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next } - { out=out","$2 } - END { print out }' 2>/dev/null - } - - if echo "$network" | grep -m1 '.kt$' 1>/dev/null; - then - # skip brackets and comments - find "$git_root" -type f \ - -name '*.kt' \ - -exec \ - grep -nIHE -e $syntax_bracket \ - -e $syntax_comment_block {} \; \ - | cut_and_join \ - >> $adjustments_file \ - || echo '' - - # last line in file - find "$git_root" -type f \ - -name '*.kt' -exec \ - wc -l {} \; \ - | while read l; do echo "EOF: $l"; done \ - 2>/dev/null \ - >> $adjustments_file \ - || echo '' - - fi - - if echo "$network" | grep -m1 '.go$' 1>/dev/null; - then - # skip empty lines, comments, and brackets - find "$git_root" -not -path '*/vendor/*' \ - -type f \ - -name '*.go' \ - -exec \ - grep -nIHE \ - -e $empty_line \ - -e $syntax_comment \ - -e $syntax_comment_block \ - -e $syntax_bracket \ - {} \; \ - | cut_and_join \ - >> $adjustments_file \ - || echo '' - fi - - if echo "$network" | grep -m1 '.dart$' 1>/dev/null; - then - # skip brackets - find "$git_root" -type f \ - -name '*.dart' \ - -exec \ - grep -nIHE \ - -e $syntax_bracket \ - {} \; \ - | cut_and_join \ - >> $adjustments_file \ - || echo '' - fi - - if echo "$network" | grep -m1 '.php$' 1>/dev/null; - then - # skip empty lines, comments, and brackets - find "$git_root" -not -path "*/vendor/*" \ - -type f \ - -name '*.php' \ - -exec \ - grep -nIHE \ - -e $syntax_list \ - -e $syntax_bracket \ - -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \ - {} \; \ - | cut_and_join \ - >> $adjustments_file \ - || echo '' - fi - - if echo "$network" | grep -m1 '\(.cpp\|.h\|.cxx\|.c\|.hpp\|.m\)$' 1>/dev/null; - then - # skip brackets - find "$git_root" -type f \ - $skip_dirs \ - \( \ - -name '*.h' \ - -or -name '*.cpp' \ - -or -name '*.cxx' \ - -or -name '*.m' \ - -or -name '*.c' \ - -or -name '*.hpp' \ - \) -exec \ - grep -nIHE \ - -e $empty_line \ - -e $syntax_bracket \ - -e '// LCOV_EXCL' \ - {} \; \ - | cut_and_join \ - >> $adjustments_file \ - || echo '' - - # skip brackets - find "$git_root" -type f \ - $skip_dirs \ - \( \ - -name '*.h' \ - -or -name '*.cpp' \ - -or -name '*.cxx' \ - -or -name '*.m' \ - -or -name '*.c' \ - -or -name '*.hpp' \ - \) -exec \ - grep -nIH '// LCOV_EXCL' \ - {} \; \ - >> $adjustments_file \ - || echo '' - - fi - - found=$(cat $adjustments_file | tr -d ' ') - - if [ "$found" != "" ]; - then - say " ${g}+${x} Found adjustments" - echo "# path=fixes" >> $upload_file - cat $adjustments_file >> $upload_file - echo "<<<<<< EOF" >> $upload_file - rm -rf $adjustments_file - else - say " ${e}->${x} No adjustments found" - fi -fi - -if [ "$url_o" != "" ]; -then - url="$url_o" -fi - -if [ "$dump" != "0" ]; -then - # trim whitespace from query - say " ${e}->${x} Dumping upload file (no upload)" - echo "$url/upload/v4?$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ')" - cat $upload_file -else - - say "${e}==>${x} Gzipping contents" - gzip -nf9 $upload_file - - query=$(echo "${query}" | tr -d ' ') - say "${e}==>${x} Uploading reports" - say " ${e}url:${x} $url" - say " ${e}query:${x} $query" - - # now add token to query - query=$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ') - - if [ "$ft_s3" = "1" ]; - then - i="0" - while [ $i -lt 4 ] - do - i=$[$i+1] - say " ${e}->${x} Pinging Codecov" - res=$(curl $curl_s -X POST $curlargs $cacert \ - -H 'X-Reduced-Redundancy: false' \ - -H 'X-Content-Type: application/x-gzip' \ - "$url/upload/v4?$query" || true) - # a good replay is "https://codecov.io" + "\n" + "https://codecov.s3.amazonaws.com/..." - status=$(echo "$res" | head -1 | grep 'HTTP ' | cut -d' ' -f2) - if [ "$status" = "" ]; - then - s3target=$(echo "$res" | sed -n 2p) - say " ${e}->${x} Uploading" - s3=$(curl $curl_s -fiX PUT $curlawsargs \ - --data-binary @$upload_file.gz \ - -H 'Content-Type: application/x-gzip' \ - -H 'Content-Encoding: gzip' \ - -H 'x-amz-acl: public-read' \ - "$s3target" || true) - if [ "$s3" != "" ]; - then - say " ${g}->${x} View reports at ${b}$(echo "$res" | sed -n 1p)${x}" - exit 0 - else - say " ${r}X>${x} Failed to upload" - fi - elif [ "$status" = "400" ]; - then - # 400 Error - say "${g}${res}${x}" - exit ${exit_with} - fi - say " ${e}->${x} Sleeping for 30s and trying again..." - sleep 30 - done - fi - - say " ${e}->${x} Uploading to Codecov" - i="0" - while [ $i -lt 4 ] - do - i=$[$i+1] - - res=$(curl $curl_s -X POST $curlargs $cacert \ - --data-binary @$upload_file.gz \ - -H 'Content-Type: text/plain' \ - -H 'Content-Encoding: gzip' \ - -H 'X-Content-Encoding: gzip' \ - -H 'Accept: text/plain' \ - "$url/upload/v2?$query" || echo 'HTTP 500') - # HTTP 200 - # http://.... - status=$(echo "$res" | head -1 | cut -d' ' -f2) - if [ "$status" = "" ]; - then - say " View reports at ${b}$(echo "$res" | head -2 | tail -1)${x}" - exit 0 - - elif [ "${status:0:1}" = "5" ]; - then - say " ${e}->${x} Sleeping for 30s and trying again..." - sleep 30 - - else - say " ${g}${res}${x}" - exit 0 - exit ${exit_with} - fi - - done - - say " ${r}X> Failed to upload coverage reports${x}" -fi - -exit ${exit_with} diff --git a/.circleci/config.yml b/.circleci/config.yml index 5ba99e41c..a387846a8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -36,7 +36,6 @@ commands: name: "Running test" command: | bash << parameters.script_path >> - jobs: setup_dependencies: executor: golang @@ -73,109 +72,13 @@ jobs: paths: - "." - test_persistence: - executor: golang - steps: - - run_test: - script_path: test/persist/test_failure_indices.sh - - test_cover: - executor: golang - parallelism: 4 - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - restore_cache: - name: "Restore go module cache" - keys: - - go-mod-v2-{{ checksum "go.sum" }} - - run: - name: "Run tests" - command: | - export VERSION="$(git describe --tags --long | sed 's/v\(.*\)/\1/')" - export GO111MODULE=on - mkdir -p /tmp/logs /tmp/workspace/profiles - for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do - id=$(basename "$pkg") - go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log" - done - - persist_to_workspace: - root: /tmp/workspace - paths: - - "profiles/*" - - store_artifacts: - path: /tmp/logs - - localnet: - working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint - machine: - image: circleci/classic:latest - environment: - GOBIN: /home/circleci/.go_workspace/bin - GOPATH: /home/circleci/.go_workspace/ - GOOS: linux - GOARCH: amd64 - parallelism: 1 - steps: - - checkout - - run: - name: run localnet and exit on failure - command: | - set -x - docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux - make localnet-start & - ./scripts/localnet-blocks-test.sh 40 5 10 localhost - - test_p2p: - environment: - GOBIN: /home/circleci/.go_workspace/bin - GOPATH: /home/circleci/.go_workspace - machine: - image: circleci/classic:latest - parameters: - ipv: - type: integer - default: 4 - steps: - - checkout - - run: mkdir -p $GOPATH/src/github.com/tendermint - - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint - - run: bash test/p2p/circleci.sh << parameters.ipv >> - - store_artifacts: - path: /home/circleci/project/test/p2p/logs - - upload_coverage: - executor: golang - steps: - - attach_workspace: - at: /tmp/workspace - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - restore_cache: - name: "Restore go module cache" - keys: - - go-mod-v2-{{ checksum "go.sum" }} - - run: - name: gather - command: | - echo "mode: atomic" > coverage.txt - for prof in $(ls /tmp/workspace/profiles/); do - tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt - done - - run: - name: upload - command: bash .circleci/codecov.sh -f coverage.txt - deploy_docs: executor: docs steps: - checkout + - run: + name: "Pull versions" + command: git fetch origin v0.32 v0.33 - run: name: "Build docs" command: make build-docs @@ -215,115 +118,6 @@ jobs: paths: - "/go/pkg/mod" - build_artifacts: - executor: golang - parallelism: 4 - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - restore_cache: - name: "Restore release dependencies cache" - keys: - - v2-release-deps-{{ checksum "go.sum" }} - - attach_workspace: - at: /tmp/workspace - - run: - name: Build artifact - command: | - # Setting CIRCLE_TAG because we do not tag the release ourselves. - source /tmp/workspace/release-version.source - if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi - - persist_to_workspace: - root: build - paths: - - "*.zip" - - "tendermint_linux_amd64" - - release_artifacts: - executor: golang - steps: - - restore_cache: - name: "Restore source code cache" - keys: - - go-src-v1-{{ .Revision }} - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Deploy to GitHub" - command: | - # Setting CIRCLE_TAG because we do not tag the release ourselves. - source /tmp/workspace/release-version.source - echo "---" - ls -la /tmp/workspace/*.zip - echo "---" - python -u scripts/release_management/sha-files.py - echo "---" - cat /tmp/workspace/SHA256SUMS - echo "---" - export RELEASE_ID="`python -u scripts/release_management/github-draft.py`" - echo "Release ID: ${RELEASE_ID}" - #Todo: Parallelize uploads - export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}" - python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}" - python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}" - - release_docker: - machine: - image: ubuntu-1604:201903-01 - steps: - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Deploy to Docker Hub" - command: | - # Setting CIRCLE_TAG because we do not tag the release ourselves. - source /tmp/workspace/release-version.source - cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint - docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER" - docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}" - docker push "tendermint/tendermint" - docker logout - - reproducible_builds: - executor: golang - steps: - - attach_workspace: - at: /tmp/workspace - - checkout - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Build tendermint - no_output_timeout: 20m - command: | - sudo apt-get update - sudo apt-get install -y ruby - bash -x ./scripts/gitian-build.sh all - for os in darwin linux windows; do - cp gitian-build-${os}/result/tendermint-${os}-res.yml . - cp gitian-build-${os}/build/out/tendermint-*.tar.gz . - rm -rf gitian-build-${os}/ - done - - store_artifacts: - path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml - - store_artifacts: - path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml - - store_artifacts: - path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml - - store_artifacts: - path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz - # # Test RPC implementation against the swagger documented specs # contract_tests: # working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint @@ -352,7 +146,7 @@ jobs: workflows: version: 2 - test-suite: + docs: jobs: - deploy_docs: context: tendermint-docs @@ -369,53 +163,6 @@ workflows: branches: only: - docs-staging - - setup_dependencies - - test_cover: - requires: - - setup_dependencies - - test_persistence: - requires: - - setup_dependencies - - localnet: - requires: - - setup_dependencies - - test_p2p - - test_p2p: - name: test_p2p_ipv6 - ipv: 6 - - upload_coverage: - requires: - - test_cover - - reproducible_builds: - filters: - branches: - only: - - master - - /v[0-9]+\.[0-9]+/ # - contract_tests: # requires: # - setup_dependencies - - release: - jobs: - - prepare_build - - build_artifacts: - requires: - - prepare_build - - release_artifacts: - requires: - - prepare_build - - build_artifacts - filters: - branches: - only: - - /v[0-9]+\.[0-9]+/ - - release_docker: - requires: - - prepare_build - - build_artifacts - filters: - branches: - only: - - /v[0-9]+\.[0-9]+/ - - master diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000..dd819a18f --- /dev/null +++ b/.clang-format @@ -0,0 +1,11 @@ +--- +Language: Proto +BasedOnStyle: Google +IndentWidth: 2 +ColumnLimit: 0 +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +SpacesInSquareBrackets: true +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..a7ae6a5b0 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +build +test/e2e/build +test/e2e/networks +test/logs +test/p2p/data diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2b513b158..c20235b33 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,9 +1,27 @@ # CODEOWNERS: https://help.github.com/articles/about-codeowners/ -# Everything goes through Bucky, Anton, Tess. For now. -* @ebuchman @melekes @tessr +# Everything goes through the following "global owners" by default. +# Unless a later match takes precedence, these three will be +# requested for review when someone opens a PR. +# Note that the last matching pattern takes precedence, so +# global owners are only requested if there isn't a more specific +# codeowner specified below. For this reason, the global codeowners +# are often repeated in package-level definitions. +* @ebuchman @erikgrinaker @melekes @tessr + +# Overrides for tooling packages +.circleci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr +.github/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr +DOCKER/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr + +# Overrides for core Tendermint packages +abci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr +evidence/ @cmwaters @ebuchman @erikgrinaker @melekes @tessr +light/ @cmwaters @melekes @ebuchman + +# Overrides for docs +*.md @marbar3778 @ebuchman @erikgrinaker @melekes @tessr +docs/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr + + -# Precious documentation -/docs/README.md @zramsay -/docs/DOCS_README.md @zramsay -/docs/.vuepress/ @zramsay diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 9c56a364a..e99d57880 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -37,6 +37,6 @@ manner. We might ask you to provide additional logs and data (tendermint & app). **node command runtime flags**: -**`/dump_consensus_state` output for consensus bugs** +**Please provide the output from the `http://:/dump_consensus_state` RPC endpoint for consensus bugs** **Anything else we need to know**: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8590275a7..975ad1cf5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,24 +1,7 @@ - - -Closes: #XXX - ## Description - +_Please add a description of the changes that this PR introduces and the files that +are the most critical to review._ +Closes: #XXX -______ - -For contributor use: - -- [ ] Wrote tests -- [ ] Updated CHANGELOG_PENDING.md -- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work. -- [ ] Updated relevant documentation (`docs/`) and code comments -- [ ] Re-reviewed `Files changed` in the Github PR explorer diff --git a/.github/auto-comment.yml b/.github/auto-comment.yml new file mode 100644 index 000000000..604c2f878 --- /dev/null +++ b/.github/auto-comment.yml @@ -0,0 +1,16 @@ +pullRequestOpened: | + :wave: Thanks for creating a PR! + + Before we can merge this PR, please make sure that all the following items have been + checked off. If any of the checklist items are not applicable, please leave them but + write a little note why. + + - [ ] Wrote tests + - [ ] Updated CHANGELOG_PENDING.md + - [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work. + - [ ] Updated relevant documentation (`docs/`) and code comments + - [ ] Re-reviewed `Files changed` in the Github PR explorer + - [ ] Applied Appropriate Labels + + + Thank you for your contribution to Tendermint! :rocket: \ No newline at end of file diff --git a/codecov.yml b/.github/codecov.yml similarity index 81% rename from codecov.yml rename to .github/codecov.yml index 4b3d527fa..ca879ab64 100644 --- a/codecov.yml +++ b/.github/codecov.yml @@ -2,7 +2,6 @@ coverage: precision: 2 round: down range: "70...100" - status: project: default: @@ -10,6 +9,9 @@ coverage: patch: on changes: off +github_checks: + annotations: false + comment: layout: "diff, files" behavior: default @@ -22,3 +24,4 @@ ignore: - "DOCKER" - "scripts" - "**/*.pb.go" + - "libs/pubsub/query/query.peg.go" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..2cdbb1e67 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,28 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily + time: "11:00" + open-pull-requests-limit: 10 + - package-ecosystem: npm + directory: "/docs" + schedule: + interval: daily + time: "11:00" + open-pull-requests-limit: 10 + reviewers: + - fadeev + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + time: "11:00" + open-pull-requests-limit: 10 + reviewers: + - melekes + - tessr + - erikgrinaker + labels: + - T:dependencies diff --git a/.github/linter/markdownlint.yml b/.github/linter/markdownlint.yml new file mode 100644 index 000000000..1637001cc --- /dev/null +++ b/.github/linter/markdownlint.yml @@ -0,0 +1,8 @@ +default: true, +MD007: { "indent": 4 } +MD013: false +MD024: { siblings_only: true } +MD025: false +MD033: { no-inline-html: false } +no-hard-tabs: false +whitespace: false diff --git a/.mergify.yml b/.github/mergify.yml similarity index 100% rename from .mergify.yml rename to .github/mergify.yml diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 63c6e0f1f..000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 60 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: 9 - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: [] - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: - - major-release - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: true - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: true - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: stale - -# Comment to post when marking as stale. Set to `false` to disable -markComment: > - This issue has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 30 - -Limit to only `issues` or `pulls` -only: pulls - -Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -pulls: - daysUntilStale: 30 - markComment: > - This pull request has been automatically marked as stale because it has not had - recent activity. It will be closed if no further activity occurs. Thank you - for your contributions. \ No newline at end of file diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 000000000..0861ce402 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,100 @@ +name: Test Coverage +on: + pull_request: + push: + branches: + - master + - release/** + +jobs: + split-test-files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Create a file with all the pkgs + run: go list ./... > pkgs.txt + - name: Split pkgs into 4 files + run: split -d -n l/4 pkgs.txt pkgs.txt.part. + # cache multiple + - uses: actions/upload-artifact@v2 + with: + name: "${{ github.sha }}-00" + path: ./pkgs.txt.part.00 + - uses: actions/upload-artifact@v2 + with: + name: "${{ github.sha }}-01" + path: ./pkgs.txt.part.01 + - uses: actions/upload-artifact@v2 + with: + name: "${{ github.sha }}-02" + path: ./pkgs.txt.part.02 + - uses: actions/upload-artifact@v2 + with: + name: "${{ github.sha }}-03" + path: ./pkgs.txt.part.03 + + tests: + runs-on: ubuntu-latest + needs: split-test-files + strategy: + fail-fast: false + matrix: + part: ["00", "01", "02", "03"] + steps: + - uses: actions/setup-go@v2 + with: + go-version: '^1.15.4' + - uses: actions/checkout@v2 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: actions/download-artifact@v2 + with: + name: "${{ github.sha }}-${{ matrix.part }}" + if: env.GIT_DIFF + - name: test & coverage report creation + run: | + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic + if: env.GIT_DIFF + - uses: actions/upload-artifact@v2 + with: + name: "${{ github.sha }}-${{ matrix.part }}-coverage" + path: ./${{ matrix.part }}profile.out + + upload-coverage-report: + runs-on: ubuntu-latest + needs: tests + steps: + - uses: actions/checkout@v2 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: actions/download-artifact@v2 + with: + name: "${{ github.sha }}-00-coverage" + if: env.GIT_DIFF + - uses: actions/download-artifact@v2 + with: + name: "${{ github.sha }}-01-coverage" + if: env.GIT_DIFF + - uses: actions/download-artifact@v2 + with: + name: "${{ github.sha }}-02-coverage" + if: env.GIT_DIFF + - uses: actions/download-artifact@v2 + with: + name: "${{ github.sha }}-03-coverage" + if: env.GIT_DIFF + - run: | + cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt + if: env.GIT_DIFF + - uses: codecov/codecov-action@v1.0.15 + with: + file: ./coverage.txt + if: env.GIT_DIFF diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 000000000..0a1dab464 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,59 @@ +name: Build & Push +# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags +# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags +on: + pull_request: + push: + branches: + - master + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5 + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.4" + - uses: actions/checkout@master + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=tendermint/tendermint + VERSION=noop + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then + VERSION=latest + fi + fi + TAGS="${DOCKER_IMAGE}:${VERSION}" + if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}" + fi + echo ::set-output name=tags::${TAGS} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build Tendermint + run: | + make build-linux && cp build/tendermint DOCKER/tendermint + + - name: Publish to Docker Hub + uses: docker/build-push-action@v2 + with: + context: ./DOCKER + file: ./DOCKER/Dockerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 000000000..f80af1588 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,31 @@ +name: Documentation +# This job builds and deploys documenation to github pages. +# It runs on every push to master. +on: + push: + branches: + - master + +jobs: + build-and-deploy: + runs-on: ubuntu-latest + container: + image: tendermintdev/docker-website-deployment + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v2.3.1 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Install and Build 🔧 + run: | + apk add rsync + make build-gh-docs + + - name: Deploy 🚀 + uses: JamesIves/github-pages-deploy-action@3.7.1 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BRANCH: gh-pages + FOLDER: ~/output diff --git a/.github/workflows/e2e-nightly.yml b/.github/workflows/e2e-nightly.yml new file mode 100644 index 000000000..b21a482c0 --- /dev/null +++ b/.github/workflows/e2e-nightly.yml @@ -0,0 +1,53 @@ +# Runs randomly generated E2E testnets nightly. +name: e2e-nightly +on: + workflow_dispatch: # allow running workflow manually + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '^1.15.4' + + - uses: actions/checkout@v2 + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly + + - name: Run testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@ecc1353ce30ef086ce3fc3d1ea9ac2e32e150402 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 000000000..6ac2077ac --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,41 @@ +name: e2e +# Runs the CI end-to-end test network on all pushes to master or release branches +# and every pull request, but only if any Go files have been changed. +on: + pull_request: + push: + branches: + - master + - release/** + +jobs: + e2e-test: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '^1.15.4' + - uses: actions/checkout@v2 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + + - name: Build + working-directory: test/e2e + # Run two make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker runner + if: "env.GIT_DIFF != ''" + + - name: Run CI testnet + working-directory: test/e2e + run: ./build/runner -f networks/ci.toml + if: "env.GIT_DIFF != ''" + + - name: Emit logs on failure + if: ${{ failure() }} + working-directory: test/e2e + run: ./build/runner -f networks/ci.toml logs diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 5b449cffd..f7c5b3e7b 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -7,6 +7,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@master - - uses: gaurav-nelson/github-action-markdown-link-check@0.6.0 + - uses: gaurav-nelson/github-action-markdown-link-check@1.0.8 with: folder-path: "docs" diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index de3126982..2dec8f34c 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -1,12 +1,29 @@ name: Lint -on: [pull_request] +# Lint runs golangci-lint over the entire Tendermint repository +# This workflow is run on every pull request and push to master +# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified. +on: + pull_request: + push: + branches: + - master jobs: - golangci-lint: + golangci: + name: golangci-lint runs-on: ubuntu-latest + timeout-minutes: 4 steps: - - uses: actions/checkout@master - - name: golangci-lint - uses: reviewdog/action-golangci-lint@v1 + - uses: actions/checkout@v2 + - uses: technote-space/get-diff-action@v4 with: - github_token: ${{ secrets.github_token }} - reporter: github-pr-review + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: golangci/golangci-lint-action@v2.3.0 + with: + # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + version: v1.31 + args: --timeout 10m + github-token: ${{ secrets.github_token }} + if: env.GIT_DIFF diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 000000000..c4098557e --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,31 @@ +name: Lint +on: + push: + branches: + - master + paths: + - "**.md" + - "**.yml" + - "**.yaml" + pull_request: + branches: [master] + paths: + - "**.md" + +jobs: + build: + name: Super linter + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v2 + - name: Lint Code Base + uses: docker://github/super-linter:v3 + env: + LINTER_RULES_PATH: . + VALIDATE_ALL_CODEBASE: true + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATE_MD: true + VALIDATE_OPAENAPI: true + VALIDATE_YAML: true diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml index ddc9ee4c4..faf663f9d 100644 --- a/.github/workflows/proto.yml +++ b/.github/workflows/proto.yml @@ -1,12 +1,22 @@ -name: Proto check -on: [pull_request] +name: Protobuf +# Protobuf runs buf (https://buf.build/) lint and check-breakage +# This workflow is only run when a .proto file has been modified +on: + pull_request: + paths: + - "**.proto" jobs: - proto-checks: + proto-lint: runs-on: ubuntu-latest + timeout-minutes: 4 steps: - uses: actions/checkout@master - - uses: docker-practice/actions-setup-docker@master - name: lint run: make proto-lint + proto-breakage: + runs-on: ubuntu-latest + timeout-minutes: 4 + steps: + - uses: actions/checkout@master - name: check-breakage run: make proto-check-breaking-ci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..6411a2d3f --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,29 @@ +name: "Release" + +on: + push: + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v2 + with: + go-version: '^1.15.4' + + - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v2 + with: + version: latest + args: release --rm-dist --release-notes=../release_notes.md + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..610e2e29e --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,18 @@ +name: "Close stale pull requests" +on: + schedule: + - cron: "0 0 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-pr-message: "This pull request has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions." + days-before-stale: 10 + days-before-close: 4 + exempt-pr-labels: "S:wip" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 75ca8b4a0..abd79aad4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,4 +1,7 @@ name: Tests +# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps) +# This workflow runs on every push to master or release branch and every pull requests +# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified on: pull_request: push: @@ -14,69 +17,130 @@ jobs: env: GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" + build: name: Build runs-on: ubuntu-latest + timeout-minutes: 5 steps: - - uses: actions/setup-go@v2-beta - - name: Set GOBIN - run: | - echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.4" - uses: actions/checkout@v2 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum - name: install run: make install install_abci - # Cache bin - - uses: actions/cache@v1 + if: "env.GIT_DIFF != ''" + - uses: actions/cache@v2.1.3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + if: env.GIT_DIFF + # Cache binaries for use by other jobs + - uses: actions/cache@v2.1.3 with: path: ~/go/bin - key: ${{ runner.os }}-go-tm-binary + key: ${{ runner.os }}-${{ github.sha }}-tm-binary + if: env.GIT_DIFF test_abci_apps: runs-on: ubuntu-latest needs: Build + timeout-minutes: 5 steps: - - uses: actions/setup-go@v2-beta - - name: Set GOBIN - run: | - echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.4" - uses: actions/checkout@v2 - - uses: actions/cache@v1 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: actions/cache@v2.1.3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + if: env.GIT_DIFF + - uses: actions/cache@v2.1.3 with: path: ~/go/bin - key: ${{ runner.os }}-go-tm-binary + key: ${{ runner.os }}-${{ github.sha }}-tm-binary + if: env.GIT_DIFF - name: test_abci_apps run: abci/tests/test_app/test.sh shell: bash + if: env.GIT_DIFF test_abci_cli: runs-on: ubuntu-latest needs: Build + timeout-minutes: 5 steps: - - uses: actions/setup-go@v2-beta - - name: Set GOBIN - run: | - echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.4" - uses: actions/checkout@v2 - - uses: actions/cache@v1 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: actions/cache@v2.1.3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + if: env.GIT_DIFF + - uses: actions/cache@v2.1.3 with: path: ~/go/bin - key: ${{ runner.os }}-go-tm-binary + key: ${{ runner.os }}-${{ github.sha }}-tm-binary + if: env.GIT_DIFF - run: abci/tests/test_cli/test.sh shell: bash + if: env.GIT_DIFF test_apps: runs-on: ubuntu-latest needs: Build + timeout-minutes: 5 steps: - - uses: actions/setup-go@v2-beta - - name: Set GOBIN - run: | - echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/setup-go@v2 + with: + go-version: "^1.15.4" - uses: actions/checkout@v2 - - uses: actions/cache@v1 + - uses: technote-space/get-diff-action@v4 + with: + PATTERNS: | + **/**.go + go.mod + go.sum + - uses: actions/cache@v2.1.3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + if: env.GIT_DIFF + - uses: actions/cache@v2.1.3 with: path: ~/go/bin - key: ${{ runner.os }}-go-tm-binary + key: ${{ runner.os }}-${{ github.sha }}-tm-binary + if: env.GIT_DIFF - name: test_apps run: test/app/test.sh shell: bash + if: env.GIT_DIFF diff --git a/.gitignore b/.gitignore index dfca4da95..d5442f35d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,48 +1,43 @@ -*.swp -*.swo -.bak *.bak +*.iml +*.log +*.swo +*.swp +*/.glide +*/vendor .DS_Store -build/* -rpc/test/.tendermint -.tendermint -remote_dump +.bak +.idea/ .revision -vendor +.tendermint +.tendermint-lite +.terraform .vagrant -test/p2p/data/ -test/logs +.vendor-new/ +.vscode/ +abci-cli +addrbook.json +artifacts/* +build/* coverage.txt +docs/.vuepress/dist docs/_build docs/dist -docs/.vuepress/dist -*.log -abci-cli docs/node_modules/ index.html.md - -scripts/wal2json/wal2json -scripts/cutWALUntil/cutWALUntil - -.idea/ -*.iml - -.vscode/ - libs/pubsub/query/fuzz_test/output +profile\.out +remote_dump +rpc/test/.tendermint +scripts/cutWALUntil/cutWALUntil +scripts/wal2json/wal2json shunit2 - -.tendermint-lite -addrbook.json - -*/vendor -.vendor-new/ -*/.glide -.terraform terraform.tfstate terraform.tfstate.backup terraform.tfstate.d - -.vscode - -profile\.out +test/e2e/build +test/e2e/networks/*/ +test/logs +test/maverick/maverick +test/p2p/data/ +vendor \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index dc934f43d..6840a3307 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,7 +5,7 @@ linters: - depguard - dogsled - dupl - # - errcheck + - errcheck # - funlen # - gochecknoglobals # - gochecknoinits @@ -20,7 +20,7 @@ linters: - gosimple - govet - ineffassign - - interfacer + # - interfacer - lll - misspell # - maligned @@ -38,14 +38,17 @@ linters: # - whitespace # - wsl # - gocognit - disable: - - errcheck + - nolintlint issues: exclude-rules: + - path: _test\.go + linters: + - gosec - linters: - lll source: "https://" + max-same-issues: 50 linters-settings: dogsled: @@ -56,15 +59,3 @@ linters-settings: # check-shadowing: true golint: min-confidence: 0 -# gocyclo: -# min-complexity: 10 -# misspell: -# locale: US -# gocritic: -# enabled-tags: -# - performance -# - style -# - experimental -# disabled-checks: -# - wrapperFunc -# - commentFormatting # https://github.com/go-critic/go-critic/issues/755 diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 000000000..494f7c245 --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,27 @@ +project_name: Tendermint + +env: + # Require use of Go modules. + - GO111MODULE=on + +builds: + - id: "Tendermint" + main: ./cmd/tendermint/main.go + ldflags: + - -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }} + env: + - CGO_ENABLED=0 + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - arm64 + +checksum: + name_template: SHA256SUMS-{{.Version}}.txt + algorithm: sha256 + +release: + name_template: "{{.Version}} (WARNING: BETA SOFTWARE)" diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 000000000..12b20d6bb --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1,6 @@ +docs/node_modules +CHANGELOG.md +docs/architecture/* +crypto/secp256k1/** +scripts/* +.github diff --git a/CHANGELOG.md b/CHANGELOG.md index e3f7df149..fe534485a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,328 @@ # Changelog +## v0.34.0 + +*November 19, 2020* + +Holy smokes, this is a big one! For a more reader-friendly overview of the changes in 0.34.0 +(and of the changes you need to accommodate as a user), check out [UPGRADING.md](UPGRADING.md). + +Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio, +@joe-bowman, @cuonglm, @SadPencil and @dongsam. + +And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES + +- CLI/RPC/Config + + - [config] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Rename `prof_laddr` to `pprof_laddr` and move it to `rpc` section (@melekes) + - [evidence] [\#4959](https://github.com/tendermint/tendermint/pull/4959) Add JSON tags to `DuplicateVoteEvidence` (@marbar3778) + - [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) `tendermint lite` command has been renamed to `tendermint light` (@marbar3778) + - [privval] [\#4582](https://github.com/tendermint/tendermint/pull/4582) `round` in private_validator_state.json is no longer JSON string; instead it is a number (@marbar3778) + - [rpc] [\#4792](https://github.com/tendermint/tendermint/pull/4792) `/validators` are now sorted by voting power (@melekes) + - [rpc] [\#4947](https://github.com/tendermint/tendermint/pull/4947) Return an error when `page` pagination param is 0 in `/validators`, `tx_search` (@melekes) + - [rpc] [\#5137](https://github.com/tendermint/tendermint/pull/5137) JSON tags of `gasWanted` and `gasUsed` in `ResponseCheckTx` and `ResponseDeliverTx` have been made snake_case (`gas_wanted` and `gas_used`) (@marbar3778) + - [rpc] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Remove `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and `/unsafe_write_heap_profile`. Please use pprof functionality instead (@melekes) + - [rpc/client, rpc/jsonrpc/client] [\#5347](https://github.com/tendermint/tendermint/pull/5347) All client methods now accept `context.Context` as 1st param (@melekes) + +- Apps + + - [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0. (@erikgrinaker) + - [abci] [\#4989](https://github.com/tendermint/tendermint/pull/4989) `Proof` within `ResponseQuery` has been renamed to `ProofOps` (@marbar3778) + - [abci] [\#5096](https://github.com/tendermint/tendermint/pull/5096) `CheckTxType` Protobuf enum names are now uppercase, to follow Protobuf style guide (@erikgrinaker) + - [abci] [\#5324](https://github.com/tendermint/tendermint/pull/5324) ABCI evidence type is now an enum with two types of possible evidence (@cmwaters) + +- P2P Protocol + + - [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778) + - [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778) + - [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778) + - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) + - `MaxBatchBytes` new config setting defines the max size of one batch. + - [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778) + - [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778) + +- Blockchain Protocol + + - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) + - [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters) + - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker) + - [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes) + - [types] [\#4792](https://github.com/tendermint/tendermint/pull/4792) Sort validators by voting power to enable faster commit verification (@melekes) + +- On-disk serialization + + - [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) Migrate state module to Protobuf encoding (@marbar3778) + - `BlockStoreStateJSON` is now `BlockStoreState` and is encoded as binary in the database + - [store] [\#4778](https://github.com/tendermint/tendermint/pull/4778) Migrate store module to Protobuf encoding (@marbar3778) + +- Light client, private validator + + - [light] [\#4964](https://github.com/tendermint/tendermint/pull/4964) Migrate light module migration to Protobuf encoding (@marbar3778) + - [privval] [\#4985](https://github.com/tendermint/tendermint/pull/4985) Migrate `privval` module to Protobuf encoding (@marbar3778) + +- Go API + + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32` (@marbar3778) + - [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) HeightVoteSet: `round` is now `int32` (@marbar3778) + - [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker) + - [crypto] [\#4940](https://github.com/tendermint/tendermint/pull/4940) All keys have become `[]byte` instead of `[]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data. (@marbar3778) + - [crypto] [\#4988](https://github.com/tendermint/tendermint/pull/4988) Removal of key type multisig (@marbar3778) + - The key has been moved to the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go) + - [crypto] [\#4989](https://github.com/tendermint/tendermint/pull/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`. (@marbar3778) + - `merkle.Proof` has been renamed to `ProofOps`. + - Protobuf messages `Proof` & `ProofOp` has been moved to `proto/crypto/merkle` + - `SimpleHashFromByteSlices` has been renamed to `HashFromByteSlices` + - `SimpleHashFromByteSlicesIterative` has been renamed to `HashFromByteSlicesIterative` + - `SimpleProofsFromByteSlices` has been renamed to `ProofsFromByteSlices` + - [crypto] [\#4941](https://github.com/tendermint/tendermint/pull/4941) Remove suffixes from all keys. (@marbar3778) + - ed25519: type `PrivKeyEd25519` is now `PrivKey` + - ed25519: type `PubKeyEd25519` is now `PubKey` + - secp256k1: type`PrivKeySecp256k1` is now `PrivKey` + - secp256k1: type`PubKeySecp256k1` is now `PubKey` + - sr25519: type `PrivKeySr25519` is now `PrivKey` + - sr25519: type `PubKeySr25519` is now `PubKey` + - [crypto] [\#5214](https://github.com/tendermint/tendermint/pull/5214) Change `GenPrivKeySecp256k1` to `GenPrivKeyFromSecret` to be consistent with other keys (@marbar3778) + - [crypto] [\#5236](https://github.com/tendermint/tendermint/pull/5236) `VerifyBytes` is now `VerifySignature` on the `crypto.PubKey` interface (@marbar3778) + - [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and change evidence interface (@cmwaters) + - [libs] [\#4831](https://github.com/tendermint/tendermint/pull/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32) (@marbar3778) + - [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation. (@marbar3778) + - [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes) + - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker) + - [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778) + - Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout) + - [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes) + - `UnconfirmedTxs` `limit` param is a pointer + - [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes) + - [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) `TxResult` is a Protobuf type defined in `abci` types directory (@marbar3778) + - [state] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `State.InitialHeight` field to record initial block height, must be `1` (not `0`) to start from 1 (@erikgrinaker) + - [state] [\#5231](https://github.com/tendermint/tendermint/pull/5231) `LoadStateFromDBOrGenesisFile()` and `LoadStateFromDBOrGenesisDoc()` no longer saves the state in the database if not found, the genesis state is simply returned (@erikgrinaker) + - [state] [\#5348](https://github.com/tendermint/tendermint/pull/5348) Define an Interface for the state store. (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `SignedMsgType` has moved to a Protobuf enum types (@marbar3778) + - [types] [\#4962](https://github.com/tendermint/tendermint/pull/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types (@marbar3778) + - [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778) + - [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) + - [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes) + - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778) + - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Block: `Round` is now `int32` (@marbar3778) + +### FEATURES + +- [abci] [\#5031](https://github.com/tendermint/tendermint/pull/5031) Add `AppVersion` to consensus parameters (@james-ray) + - This makes it possible to update your ABCI application version via `EndBlock` response +- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters) +- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker) +- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker) +- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam) +- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker) +- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes) +- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters) +- [genesis] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `initial_height` field to specify the initial chain height (defaults to `1`) (@erikgrinaker) +- [libs/math] [\#5665](https://github.com/tendermint/tendermint/pull/5665) Make fractions unsigned integers (uint64) (@cmwaters) +- [light] [\#5298](https://github.com/tendermint/tendermint/pull/5298) Morph validator set and signed header into light block (@cmwaters) +- [p2p] [\#4981](https://github.com/tendermint/tendermint/pull/4981) Expose `SaveAs` func on NodeKey (@melekes) +- [privval] [\#5239](https://github.com/tendermint/tendermint/pull/5239) Add `chainID` to requests from client. (@marbar3778) +- [rpc] [\#4532](https://github.com/tendermint/tendermint/pull/4923) Support `BlockByHash` query (@fedekunze) +- [rpc] [\#4979](https://github.com/tendermint/tendermint/pull/4979) Support EXISTS operator in `/tx_search` query (@melekes) +- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes) +- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters) +- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section. +- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters) + +### IMPROVEMENTS + +- [blockchain] [\#5278](https://github.com/tendermint/tendermint/pull/5278) Verify only +2/3 of the signatures in a block when fast syncing. (@marbar3778) +- [consensus] [\#4578](https://github.com/tendermint/tendermint/pull/4578) Attempt to repair the consensus WAL file (`data/cs.wal/wal`) automatically in case of corruption (@alessio) + - The original WAL file will be backed up to `data/cs.wal/wal.CORRUPTED`. +- [consensus] [\#5143](https://github.com/tendermint/tendermint/pull/5143) Only call `privValidator.GetPubKey` once per block (@melekes) +- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters) +- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters) +- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters) +- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) +- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes) +- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes) +- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters) +- [mempool] Add RemoveTxByKey() exported function for custom mempool cleaning (@p4u) +- [p2p/conn] [\#4795](https://github.com/tendermint/tendermint/pull/4795) Return err on `signChallenge()` instead of panic +- [privval] [\#5437](https://github.com/tendermint/tendermint/pull/5437) `NewSignerDialerEndpoint` can now be given `SignerServiceEndpointOption` (@erikgrinaker) +- [rpc] [\#4968](https://github.com/tendermint/tendermint/pull/4968) JSON encoding is now handled by `libs/json`, not Amino (@erikgrinaker) +- [rpc] [\#5293](https://github.com/tendermint/tendermint/pull/5293) `/dial_peers` has added `private` and `unconditional` as parameters. (@marbar3778) +- [state] [\#4781](https://github.com/tendermint/tendermint/pull/4781) Export `InitStateVersion` for the initial state version (@erikgrinaker) +- [txindex] [\#4466](https://github.com/tendermint/tendermint/pull/4466) Allow to index an event at runtime (@favadi) + - `abci.EventAttribute` replaces `KV.Pair` +- [types] [\#4905](https://github.com/tendermint/tendermint/pull/4905) Add `ValidateBasic` to validator and validator set (@cmwaters) +- [types] [\#5340](https://github.com/tendermint/tendermint/pull/5340) Add check in `Header.ValidateBasic()` for block protocol version (@marbar3778) +- [types] [\#5490](https://github.com/tendermint/tendermint/pull/5490) Use `Commit` and `CommitSig` max sizes instead of vote max size to calculate the maximum block size. (@cmwaters) + + +### BUG FIXES + +- [abci/grpc] [\#5520](https://github.com/tendermint/tendermint/pull/5520) Return async responses in order, to avoid mempool panics. (@erikgrinaker) +- [blockchain/v2] [\#4971](https://github.com/tendermint/tendermint/pull/4971) Correctly set block store base in status responses (@erikgrinaker) +- [blockchain/v2] [\#5499](https://github.com/tendermint/tendermint/pull/5499) Fix "duplicate block enqueued by processor" panic (@melekes) +- [blockchain/v2] [\#5530](https://github.com/tendermint/tendermint/pull/5530) Fix out of order block processing panic (@melekes) +- [blockchain/v2] [\#5553](https://github.com/tendermint/tendermint/pull/5553) Make the removal of an already removed peer a noop (@melekes) +- [consensus] [\#4895](https://github.com/tendermint/tendermint/pull/4895) Cache the address of the validator to reduce querying a remote KMS (@joe-bowman) +- [consensus] [\#4970](https://github.com/tendermint/tendermint/pull/4970) Don't allow `LastCommitRound` to be negative (@cuonglm) +- [consensus] [\#5329](https://github.com/tendermint/tendermint/pull/5329) Fix wrong proposer schedule for validators returned by `InitChain` (@erikgrinaker) +- [docker] [\#5385](https://github.com/tendermint/tendermint/pull/5385) Fix incorrect `time_iota_ms` default setting causing block timestamp drift (@erikgrinaker) +- [evidence] [\#5170](https://github.com/tendermint/tendermint/pull/5170) Change ABCI evidence time to the time the infraction happened not the time the evidence was committed on the block (@cmwaters) +- [evidence] [\#5610](https://github.com/tendermint/tendermint/pull/5610) Make it possible for ABCI evidence to be formed from Tendermint evidence (@cmwaters) +- [libs/rand] [\#5215](https://github.com/tendermint/tendermint/pull/5215) Fix out-of-memory error on unexpected argument of Str() (@SadPencil) +- [light] [\#5307](https://github.com/tendermint/tendermint/pull/5307) Persist correct proposer priority in light client validator sets (@cmwaters) +- [p2p] [\#5136](https://github.com/tendermint/tendermint/pull/5136) Fix error for peer with the same ID but different IPs (@valardragon) +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [proxy] [\#5078](https://github.com/tendermint/tendermint/pull/5078) Force Tendermint to exit when ABCI app crashes (@melekes) +- [rpc] [\#5660](https://github.com/tendermint/tendermint/pull/5660) Set `application/json` as the `Content-Type` header in RPC responses. (@alexanderbez) +- [store] [\#5382](https://github.com/tendermint/tendermint/pull/5382) Fix race conditions when loading/saving/pruning blocks (@erikgrinaker) + +## v0.33.8 + +*August 11, 2020* + +### Go security update + +Go reported a security vulnerability that affected the `encoding/binary` package. The most recent binary for tendermint is using 1.14.6, for this +reason the Tendermint engineering team has opted to conduct a release to aid users in using the correct version of Go. Read more about the security issue [here](https://github.com/golang/go/issues/40618). + + +## v0.33.7 + + *August 4, 2020* + + ### BUG FIXES: + + - [go] Build release binary using Go 1.14.4, to avoid halt caused by Go 1.14.1 (https://github.com/golang/go/issues/38223) + - [privval] [\#5140](https://github.com/tendermint/tendermint/pull/5140) `RemoteSignerError` from remote signers are no longer retried (@melekes) + + +## v0.33.6 + +*July 2, 2020* + +This security release fixes: + +### Denial of service + +Tendermint 0.33.0 and above allow block proposers to include signatures for the +wrong block. This may happen naturally if you start a network, have it run for +some time and restart it **without changing the chainID**. (It is a +[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html) +to reuse chainIDs.) Correct block proposers will accidentally include signatures +for the wrong block if they see these signatures, and then commits won't validate, +making all proposed blocks invalid. A malicious validator (even with a minimal +amount of stake) can use this vulnerability to completely halt the network. + +Tendermint 0.33.6 checks all the signatures are for the block with +2/3 +majority before creating a commit. + +### False Witness + +Tendermint 0.33.1 and above are no longer fully verifying commit signatures +during block execution - they stop after +2/3. This means proposers can propose +blocks that contain valid +2/3 signatures and then the rest of the signatures +can be whatever they want. They can claim that all the other validators signed +just by including a CommitSig with arbitrary signature data. While this doesn't +seem to impact safety of Tendermint per se, it means that Commits may contain a +lot of invalid data. + +_This was already true of blocks, since they could include invalid txs filled +with garbage, but in that case the application knew that they are invalid and +could punish the proposer. But since applications didn't--and don't-- +verify commit signatures directly (they trust Tendermint to do that), +they won't be able to detect it._ + +This can impact incentivization logic in the application that depends on the +LastCommitInfo sent in BeginBlock, which includes which validators signed. For +instance, Gaia incentivizes proposers with a bonus for including more than +2/3 +of the signatures. But a proposer can now claim that bonus just by including +arbitrary data for the final -1/3 of validators without actually waiting for +their signatures. There may be other tricks that can be played because of this. + +Tendermint 0.33.6 verifies all the signatures during block execution. + +_Please note that the light client does not check nil votes and exits as soon +as 2/3+ of the signatures are checked._ + +**All clients are recommended to upgrade.** + +Special thanks to @njmurarka at Bluzelle Networks for reporting this. + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### SECURITY: + +- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman) +- [consensus] Verify all the signatures during block execution (@melekes) + +**Please note that the fix for the False Witness issue renames the `VerifyCommitTrusting` +function to `VerifyCommitLightTrusting`. If you were relying on the light client, you may +need to update your code.** + +## v0.33.5 + +*May 28, 2020* + +Special thanks to external contributors on this release: @tau3, + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +- Go API + + - [privval] [\#4744](https://github.com/tendermint/tendermint/pull/4744) Remove deprecated `OldFilePV` (@melekes) + - [mempool] [\#4759](https://github.com/tendermint/tendermint/pull/4759) Modify `Mempool#InitWAL` to return an error (@melekes) + - [node] [\#4832](https://github.com/tendermint/tendermint/pull/4832) `ConfigureRPC` returns an error (@melekes) + - [rpc] [\#4836](https://github.com/tendermint/tendermint/pull/4836) Overhaul `lib` folder (@melekes) + Move lib/ folder to jsonrpc/. + Rename: + rpc package -> jsonrpc package + rpcclient package -> client package + rpcserver package -> server package + JSONRPCClient to Client + JSONRPCRequestBatch to RequestBatch + JSONRPCCaller to Caller + StartHTTPServer to Serve + StartHTTPAndTLSServer to ServeTLS + NewURIClient to NewURI + NewJSONRPCClient to New + NewJSONRPCClientWithHTTPClient to NewWithHTTPClient + NewWSClient to NewWS + Unexpose ResponseWriterWrapper + Remove unused http_params.go + + +### FEATURES: + +- [pex] [\#4439](https://github.com/tendermint/tendermint/pull/4439) Use highwayhash for pex buckets (@tau3) + +### IMPROVEMENTS: + +- [abci/server] [\#4719](https://github.com/tendermint/tendermint/pull/4719) Print panic & stack trace to STDERR if logger is not set (@melekes) +- [types] [\#4638](https://github.com/tendermint/tendermint/pull/4638) Implement `Header#ValidateBasic` (@alexanderbez) +- [buildsystem] [\#4378](https://github.com/tendermint/tendermint/pull/4738) Replace build_c and install_c with TENDERMINT_BUILD_OPTIONS parsing. The following options are available: + - nostrip: don't strip debugging symbols nor DWARF tables. + - cleveldb: use cleveldb as db backend instead of goleveldb. + - race: pass -race to go build and enable data race detection. +- [mempool] [\#4759](https://github.com/tendermint/tendermint/pull/4759) Allow ReapX and CheckTx functions to run in parallel (@melekes) +- [rpc/core] [\#4844](https://github.com/tendermint/tendermint/pull/4844) Do not lock consensus state in `/validators`, `/consensus_params` and `/status` (@melekes) + +### BUG FIXES: + +- [blockchain/v2] [\#4761](https://github.com/tendermint/tendermint/pull/4761) Fix excessive CPU usage caused by spinning on closed channels (@erikgrinaker) +- [blockchain/v2] Respect `fast_sync` option (@erikgrinaker) +- [light] [\#4741](https://github.com/tendermint/tendermint/pull/4741) Correctly return `ErrSignedHeaderNotFound` and `ErrValidatorSetNotFound` on corresponding RPC errors (@erikgrinaker) +- [rpc] [\#4805](https://github.com/tendermint/tendermint/issues/4805) Attempt to handle panics during panic recovery (@erikgrinaker) +- [types] [\#4764](https://github.com/tendermint/tendermint/pull/4764) Return an error if voting power overflows in `VerifyCommitTrusting` (@melekes) +- [privval] [\#4812](https://github.com/tendermint/tendermint/pull/4812) Retry `GetPubKey/SignVote/SignProposal` a few times before returning an error (@melekes) +- [p2p] [\#4847](https://github.com/tendermint/tendermint/pull/4847) Return masked IP (not the actual IP) in addrbook#groupKey (@melekes) + ## v0.33.4 - Nodes are no longer guaranteed to contain all blocks up to the latest height. The ABCI app can now control which blocks to retain through the ABCI field `ResponseCommit.retain_height`, all blocks and associated data below this height will be removed. @@ -42,7 +365,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### BUG FIXES: - [rpc] [\#4568](https://github.com/tendermint/tendermint/issues/4568) Fix panic when `Subscribe` is called, but HTTP client is not running. `Subscribe`, `Unsubscribe(All)` methods return an error now (@melekes). - ## v0.33.3 @@ -339,6 +661,30 @@ subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/b - [consensus/types] [\#4243](https://github.com/tendermint/tendermint/issues/4243) fix BenchmarkRoundStateDeepCopy panics (@cuonglm) - [rpc] [\#4256](https://github.com/tendermint/tendermint/issues/4256) Pass `outCapacity` to `eventBus#Subscribe` when subscribing using a local client +## v0.32.13 + +*August 5, 2020* + + ### BUG FIXES + + - [privval] [\#5112](https://github.com/tendermint/tendermint/issues/5112) If remote signer errors, don't retry (@melekes) + +## v0.32.12 + +*May 19, 2020* + +### BUG FIXES + +- [p2p] [\#4847](https://github.com/tendermint/tendermint/pull/4847) Return masked IP (not the actual IP) in addrbook#groupKey (@melekes) + +## v0.32.11 + +*April 29, 2020* + +### BUG FIXES: + +- [privval] [\#4275](https://github.com/tendermint/tendermint/issues/4275) Fix consensus failure when remote signer drops (@melekes) + ## v0.32.10 *April 6, 2020* @@ -421,7 +767,7 @@ program](https://hackerone.com/tendermint). ### BUG FIXES: -- [rpc/lib] [\#4051](https://github.com/tendermint/tendermint/pull/4131) Fix RPC client, which was previously resolving https protocol to http (@yenkhoon) +- [rpc/lib] [\#4131](https://github.com/tendermint/tendermint/pull/4131) Fix RPC client, which was previously resolving https protocol to http (@yenkhoon) - [cs] [\#4069](https://github.com/tendermint/tendermint/issues/4069) Don't panic when block meta is not found in store (@gregzaitsev) ## v0.32.8 diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 2f02d180f..7d2b43718 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,25 +1,38 @@ -## v0.33.5 +# Unreleased Changes -\*\* +## vX.X Special thanks to external contributors on this release: Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). -### BREAKING CHANGES: +### BREAKING CHANGES - CLI/RPC/Config + - [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - Apps - P2P Protocol - Go API + - [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker) -### FEATURES: +- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) +- Blockchain Protocol -### IMPROVEMENTS: +### FEATURES -### BUG FIXES: + +### IMPROVEMENTS + +- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) +- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. + +### BUG FIXES + +- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778) +- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7088fca48..8c5a99203 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,5 @@ # The Tendermint Code of Conduct + This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint. @@ -6,6 +7,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a # Conduct + ## Contact: conduct@tendermint.com * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. @@ -29,6 +31,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a # Moderation + These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person. 1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a972e9c3b..b51d35ae2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ contributing, it may be helpful to understand the goal of the project. The goal of Tendermint is to develop a BFT consensus engine robust enough to support permissionless value-carrying networks. While all contributions are welcome, contributors should bear this goal in mind in deciding if they should -target the main tendermint project or a potential fork. When targeting the +target the main Tendermint project or a potential fork. When targeting the main Tendermint project, the following process leads to the best chance of landing changes in master. @@ -30,14 +30,17 @@ Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion at the RFC stage will build collective understanding of the dimensions of the problems and help structure conversations around trade-offs. -When the problem is well understood but the solution leads to large -structural changes to the code base, these changes should be proposed in -the form of an [Architectural Decision Record -(ADR)](./docs/architecture/). The ADR will help build consensus on an -overall strategy to ensure the code base maintains coherence -in the larger context. If you are not comfortable with writing an ADR, -you can open a less-formal issue and the maintainers will help you -turn it into an ADR. ADR numbers can be registered [here](https://github.com/tendermint/tendermint/issues/2313). +When the problem is well understood but the solution leads to large structural +changes to the code base, these changes should be proposed in the form of an +[Architectural Decision Record (ADR)](./docs/architecture/). The ADR will help +build consensus on an overall strategy to ensure the code base maintains +coherence in the larger context. If you are not comfortable with writing an +ADR, you can open a less-formal issue and the maintainers will help you turn it +into an ADR. + +> How to pick a number for the ADR? + +Find the largest existing ADR number and bump it by 1. When the problem as well as proposed solution are well understood, changes should start with a [draft @@ -63,12 +66,12 @@ Instead, we use `git remote` to add the fork as a new remote for the original re For instance, to create a fork and work on a branch of it, I would: -- Create the fork on github, using the fork button. +- Create the fork on GitHub, using the fork button. - Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`) - `git remote rename origin upstream` - `git remote add origin git@github.com:ebuchman/basecoin.git` -Now `origin` refers to my fork and `upstream` refers to the tendermint version. +Now `origin` refers to my fork and `upstream` refers to the Tendermint version. So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there. Of course, replace `ebuchman` with your git handle. @@ -103,39 +106,40 @@ specify exactly the dependency you want to update, eg. We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. -For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. +For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. There are two ways to generate your proto stubs. 1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker` -2. Run `make proto-gen` after installing `protoc` and gogoproto. +2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`. ### Installation Instructions -To install `protoc`, download an appropriate release (https://github.com/protocolbuffers/protobuf) and then move the provided binaries into your PATH (follow instructions in README included with the download). +To install `protoc`, download an appropriate release () and then move the provided binaries into your PATH (follow instructions in README included with the download). To install `gogoproto`, do the following: ```sh -$ go get github.com/gogo/protobuf/gogoproto -$ cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things -$ make install +go get github.com/gogo/protobuf/gogoproto +cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things +make install ``` You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files. -## Vagrant - -If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started -hacking Tendermint with the commands below. +### Visual Studio Code -NOTE: In case you installed Vagrant in 2017, you might need to run -`vagrant box update` to upgrade to the latest `ubuntu/xenial64`. +If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`: -``` -vagrant up -vagrant ssh -make test +```json +{ + "protoc": { + "options": [ + "--proto_path=${workspaceRoot}/proto", + "--proto_path=${workspaceRoot}/third_party/proto" + ] + } +} ``` ## Changelog @@ -145,7 +149,7 @@ pull-request that includes an update to the `CHANGELOG_PENDING.md` file. Changelog entries should be formatted as follows: -``` +```md - [module] \#xxx Some description about the change (@contributor) ``` @@ -153,7 +157,7 @@ Here, `module` is the part of the code that changed (typically a top-level Go package), `xxx` is the pull-request number, and `contributor` is the author/s of the change. -It's also acceptable for `xxx` to refer to the relevent issue number, but pull-request +It's also acceptable for `xxx` to refer to the relevant issue number, but pull-request numbers are preferred. Note this means pull-requests should be opened first so the changelog can then be updated with the pull-request's number. @@ -170,7 +174,7 @@ Breaking changes are further subdivided according to the APIs/users they impact. Any change that effects multiple APIs/users should be recorded multiply - for instance, a change to the `Blockchain Protocol` that removes a field from the header should also be recorded under `CLI/RPC/Config` since the field will be -removed from the header in rpc responses as well. +removed from the header in RPC responses as well. ## Branching Model and Release @@ -178,38 +182,64 @@ The main development branch is master. Every release is maintained in a release branch named `vX.Y.Z`. +Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to master. + Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it easy to reference the pull request where a change was introduced. ### Development Procedure -- the latest state of development is on `master` -- `master` must never fail `make test` -- never --force onto `master` (except when reverting a broken commit, which should seldom happen) -- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`) -- make changes and update the `CHANGELOG_PENDING.md` to record your change -- before submitting a pull request, run `git rebase` on top of the latest `master` +The latest state of development is on `master`, which must never fail `make test`. _Never_ force push `master`, unless fixing broken git history (which we rarely do anyways). + +To begin contributing, create a development branch either on `github.com/tendermint/tendermint`, or your fork (using `git remote add origin`). + +Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `master`. (Since pull requests are squash-merged, either is fine!) + +Update the `UPGRADING.md` if the change you've made is breaking and the +instructions should be in place for a user on how he/she can upgrade it's +software (ABCI application, Tendermint-based blockchain, light client, wallet). + +Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release. -When you have submitted a pull request label the pull request with either `R:minor`, if the change can be accepted in a minor release, or `R:major`, if the change is meant for a major release. +Sometimes (often!) pull requests get out-of-date with master, as other people merge different pull requests to master. It is our convention that pull request authors are responsible for updating their branches with master. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!) -### Pull Merge Procedure +#### Merging Pull Requests + +It is also our convention that authors merge their own pull requests, when possible. External contributors may not have the necessary permissions to do this, in which case, a member of the core team will merge the pull request once it's been approved. + +Before merging a pull request: + +- Ensure pull branch is up-to-date with a recent `master` (GitHub won't let you merge without this!) +- Run `make test` to ensure that all tests pass +- [Squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request + +#### Pull Requests for Minor Releases + +If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to master_. + +You can do this by cherry-picking your commit off master: + +```sh +$ git checkout rc1/v0.33.5 +$ git checkout -b {new branch name} +$ git cherry-pick {commit SHA from master} +# may need to fix conflicts, and then use git add and git cherry-pick --continue +$ git push origin {new branch name} +``` -- ensure pull branch is based on a recent `master` -- run `make test` to ensure that all tests pass -- [squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request -- the `unstable` branch may be used to aggregate pull merges before fixing tests +After this, you can open a PR. Please note in the PR body if there were merge conflicts so that reviewers can be sure to take a thorough look. ### Git Commit Style We follow the [Go style guide on commit messages](https://tip.golang.org/doc/contribute.html#commit_messages). Write concise commits that start with the package name and have a description that finishes the sentence "This change modifies Tendermint to...". For example, -\``` +```sh cmd/debug: execute p.Signal only when p is not nil [potentially longer description in the body] Fixes #nnnn -\``` +``` Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! @@ -217,69 +247,113 @@ Each PR should have one commit once it lands on `master`; this can be accomplish #### Major Release -1. start on `master` -2. run integration tests (see `test_integrations` in Makefile) -3. prepare release in a pull request against `master` (to be squash merged): - - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, and you'd like to cut a major release directly from master, see below. + +1. Start on the latest RC branch (`RCx/vX.X.0`). +2. Run integration tests. +3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`). +5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch: + - `git checkout RCx/vX.X.0` + - `git checkout -b release/vX.X.0` +6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`. + - `git tag -a vX.X.0 -m 'Release vX.X.0'` + - `git push origin vX.X.0` +7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this + new major release series. + +##### Major Release (from `master`) + +1. Start on `master` +2. Run integration tests (see `test_integrations` in Makefile) +3. Prepare release in a pull request against `master` (to be squash merged): + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release + had release candidates, squash all the RC updates into one + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - run `bash ./scripts/authors.sh` to get a list of authors since the latest + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the github aliases of external contributors to the top of the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - reset the `CHANGELOG_PENDING.md` - - bump the appropriate versions in `version.go` -4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`) -5. merge back to master (don't squash merge!) + - Reset the `CHANGELOG_PENDING.md` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary + - Make sure all significant breaking changes are covered in `UPGRADING.md` + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Push a tag with prepared release details (this will trigger the release `vX.X.0`) + - `git tag -a vX.X.x -m 'Release vX.X.x'` + - `git push origin vX.X.x` +5. Update the `CHANGELOG.md` file on master with the releases changelog. +6. Delete any RC branches and tags for this release (if applicable) #### Minor Release -Minor releases are done differently from major releases. Minor release pull requests should be labeled with `R:minor` if they are to be included. - -1. Checkout the last major release, `vX.X`. - - - `git checkout vX.X` - -2. Create a release candidate branch off the most recent major release with your upcoming version specified, `rc1/vX.X.x`, and push the branch. - - - `git checkout -b rc1/vX.X.x` - - `git push -u origin rc1/vX.X.x` - -3. Create a cherry-picking branch, and make a pull request into the release candidate. - - - `git checkout -b cherry-picks/rc1/vX.X.x` - - - This is for devs to approve the commits that are entering the release candidate. - - There may be merge conflicts. - -4. Begin cherry-picking. - - - `git cherry-pick {PR commit from master you wish to cherry pick}` - - Fix conflicts - - `git cherry-pick --continue` - - `git push cherry-picks/rc1/vX.X.x` - - > Once all commits are included and CI/tests have passed, then it is ready for a release. - -5. Create a release branch `release/vX.X.x` off the release candidate branch. +Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master. +1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X` +2. Run integration tests: `make test_integrations` +3. Prepare the release: + - copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - reset the `CHANGELOG_PENDING.md` + - bump P2P and block protocol versions in `version.go`, if necessary + - bump ABCI protocol version in `version.go`, if necessary + - make sure all significant breaking changes are covered in `UPGRADING.md` + - Add any release notes you would like to be added to the body of the release to `release_notes.md`. +4. Create a release branch `release/vX.X.x` off the release candidate branch: - `git checkout -b release/vX.X.x` - `git push -u origin release/vX.X.x` - > Note this Branch is protected once pushed, you will need admin help to make any change merges into the branch. - -6. Merge Commit the release branch into the latest major release branch `vX.X`, this will start the release process. - -7. Create a Pull Request back to master with the CHANGELOG & version changes from the latest release. + - Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch. +5. Once the release branch has been approved, make sure to pull it locally, then push a tag. + - `git tag -a vX.X.x -m 'Release vX.X.x'` + - `git push origin vX.X.x` +6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - Remove all `R:minor` labels from the pull requests that were included in the release. - > Note: Do not merge the release branch into master. + - Do not merge the release branch into master. +7. Delete the former long lived release candidate branch once the release has been made. +8. Create a new release candidate branch to be used for the next release. #### Backport Release -1. start from the existing release branch you want to backport changes to (e.g. v0.30) +1. Start from the existing release branch you want to backport changes to (e.g. v0.30) Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7) -2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed) -3. steps 2 and 3 from [Major Release](#major-release) -4. push changes to release/vX.X.X branch -5. open a PR against the existing vX.X branch +2. Cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed) +3. Follow steps 2 and 3 from [Major Release](#major-release) +4. Push changes to release/vX.X.X branch +5. Open a PR against the existing vX.X branch + +#### Release Candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of RC branches. RC branches typically have names formatted +like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow +the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +1. Start from the RC branch (e.g. `RC0/v0.34.0`). +2. Create the new tag, specifying a name and a tag "message": + `git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0` +3. Push the tag back up to origin: + `git push origin v0.34.0-rc4` + Now the tag should be available on the repo's releases page. +4. Create a new release candidate branch for any possible updates to the RC: + `git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0` ## Testing @@ -292,7 +366,7 @@ includes its continuous integration status using a badge in the `README.md`. ### RPC Testing -If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./rpc/swagger/swagger.yaml) +If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml) To test your changes you should install `nodejs` and run: ```bash diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index e3a918749..67aa3663f 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -27,28 +27,21 @@ WORKDIR $TMHOME # p2p, rpc and prometheus port EXPOSE 26656 26657 26660 -ENTRYPOINT ["/usr/bin/tendermint"] -CMD ["node"] STOPSIGNAL SIGTERM ARG BINARY=tendermint COPY $BINARY /usr/bin/tendermint -# Create default configuration for docker run. -RUN /usr/bin/tendermint init && \ - sed -i \ - -e 's/^proxy_app\s*=.*/proxy_app = "kvstore"/' \ - -e 's/^moniker\s*=.*/moniker = "dockernode"/' \ - -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ - -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ - -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ - -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ - -e 's/^prometheus\s*=.*/prometheus = true/' \ - $TMHOME/config/config.toml && \ - sed -i \ - -e 's/^\s*"chain_id":.*/ "chain_id": "dockerchain",/' \ - $TMHOME/config/genesis.json +# You can overwrite these before the first run to influence +# config.json and genesis.json. Additionally, you can override +# CMD to add parameters to `tendermint node`. +ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain + +COPY ./docker-entrypoint.sh /usr/local/bin/ + +ENTRYPOINT ["docker-entrypoint.sh"] +CMD ["node"] # Expose the data directory as a volume since there's mutable state in there -VOLUME [ $TMHOME ] +VOLUME [ "$TMHOME" ] diff --git a/DOCKER/Dockerfile.build_c-amazonlinux b/DOCKER/Dockerfile.build_c-amazonlinux index 64babe3ae..05bc7e265 100644 --- a/DOCKER/Dockerfile.build_c-amazonlinux +++ b/DOCKER/Dockerfile.build_c-amazonlinux @@ -24,5 +24,5 @@ ENV GOPATH=/go/src RUN mkdir -p /tendermint WORKDIR /tendermint -CMD ["/usr/bin/make", "build_c"] +CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"] diff --git a/DOCKER/README.md b/DOCKER/README.md index e60303c68..5cd39446f 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -8,12 +8,12 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile). -Respective versioned files can be found https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile (replace the Xs with the version number). +Respective versioned files can be found (replace the Xs with the version number). ## Quick reference -- **Where to get help:** https://tendermint.com/ -- **Where to file issues:** https://github.com/tendermint/tendermint/issues +- **Where to get help:** +- **Where to file issues:** - **Supported Docker versions:** [the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis) ## Tendermint @@ -30,7 +30,7 @@ To get started developing applications, see the [application developers guide](h A quick example of a built-in app and Tendermint core in one container. -``` +```sh docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore ``` @@ -39,7 +39,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run: -``` +```sh make build-linux make build-docker-localnode make localnet-start diff --git a/DOCKER/docker-entrypoint.sh b/DOCKER/docker-entrypoint.sh new file mode 100755 index 000000000..0cb45c58a --- /dev/null +++ b/DOCKER/docker-entrypoint.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +if [ ! -d "$TMHOME/config" ]; then + echo "Running tendermint init to create (default) configuration for docker run." + tendermint init + + sed -i \ + -e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \ + -e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \ + -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ + -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ + -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ + -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ + -e 's/^prometheus\s*=.*/prometheus = true/' \ + "$TMHOME/config/config.toml" + + jq ".chain_id = \"$CHAIN_ID\" | .consensus_params.block.time_iota_ms = \"500\"" \ + "$TMHOME/config/genesis.json" > "$TMHOME/config/genesis.json.new" + mv "$TMHOME/config/genesis.json.new" "$TMHOME/config/genesis.json" +fi + +exec tendermint "$@" diff --git a/Makefile b/Makefile index eb9626bf4..b73a51467 100644 --- a/Makefile +++ b/Makefile @@ -1,42 +1,74 @@ +#!/usr/bin/make -f + PACKAGES=$(shell go list ./...) -OUTPUT?=build/tendermint +BUILDDIR ?= $(CURDIR)/build -BUILD_TAGS?='tendermint' -LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w +BUILD_TAGS?=tendermint +VERSION := $(shell git describe --always) +LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION) BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" HTTPS_GIT := https://github.com/tendermint/tendermint.git DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf +CGO_ENABLED ?= 0 + +# handle nostrip +ifeq (,$(findstring nostrip,$(TENDERMINT_BUILD_OPTIONS))) + BUILD_FLAGS += -trimpath + LD_FLAGS += -s -w +endif + +# handle race +ifeq (race,$(findstring race,$(TENDERMINT_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_FLAGS += -race +endif + +# handle cleveldb +ifeq (cleveldb,$(findstring cleveldb,$(TENDERMINT_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_TAGS += cleveldb +endif + +# handle badgerdb +ifeq (badgerdb,$(findstring badgerdb,$(TENDERMINT_BUILD_OPTIONS))) + BUILD_TAGS += badgerdb +endif + +# handle rocksdb +ifeq (rocksdb,$(findstring rocksdb,$(TENDERMINT_BUILD_OPTIONS))) + CGO_ENABLED=1 + BUILD_TAGS += rocksdb +endif + +# handle boltdb +ifeq (boltdb,$(findstring boltdb,$(TENDERMINT_BUILD_OPTIONS))) + BUILD_TAGS += boltdb +endif + +# allow users to pass additional flags via the conventional LDFLAGS variable +LD_FLAGS += $(LDFLAGS) all: check build test install .PHONY: all # The below include contains the tools. -include tools.mk -include tests.mk +include tools/Makefile +include test/Makefile ############################################################################### ### Build Tendermint ### ############################################################################### -build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ +build: $(BUILDDIR)/ + CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/ .PHONY: build -build_c: - CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/ -.PHONY: build_c - -build_race: - CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint -.PHONY: build_race - install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint + CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint .PHONY: install -install_c: - CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint -.PHONY: install_c +$(BUILDDIR)/: + mkdir -p $@ ############################################################################### ### Protobuf ### @@ -63,6 +95,11 @@ proto-lint: @$(DOCKER_BUF) check lint --error-format=json .PHONY: proto-lint +proto-format: + @echo "Formatting Protobuf files" + docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \; +.PHONY: proto-format + proto-check-breaking: @$(DOCKER_BUF) check breaking --against-input .git#branch=master .PHONY: proto-check-breaking @@ -111,7 +148,7 @@ draw_deps: get_deps_bin_size: @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1)) + $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1)) @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" .PHONY: get_deps_bin_size @@ -125,15 +162,15 @@ gen_certs: clean_certs certstrap init --common-name "tendermint.com" --passphrase "" certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase "" certstrap sign "server" --CA "tendermint.com" --passphrase "" - mv out/server.crt rpc/lib/server/test.crt - mv out/server.key rpc/lib/server/test.key + mv out/server.crt rpc/jsonrpc/server/test.crt + mv out/server.key rpc/jsonrpc/server/test.key rm -rf out .PHONY: gen_certs # deletes generated certificates clean_certs: - rm -f rpc/lib/server/test.crt - rm -f rpc/lib/server/test.key + rm -f rpc/jsonrpc/server/test.crt + rm -f rpc/jsonrpc/server/test.key .PHONY: clean_certs ############################################################################### @@ -155,17 +192,28 @@ DESTINATION = ./index.html.md ############################################################################### ### Documentation ### ############################################################################### - +# todo remove once tendermint.com DNS is solved build-docs: - cd docs && \ - while read p; do \ - (git checkout $${p} && npm install && VUEPRESS_BASE="/$${p}/" npm run build) ; \ - mkdir -p ~/output/$${p} ; \ - cp -r .vuepress/dist/* ~/output/$${p}/ ; \ - cp ~/output/$${p}/index.html ~/output ; \ + @cd docs && \ + while read -r branch path_prefix; do \ + (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + mkdir -p ~/output/$${path_prefix} ; \ + cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ + cp ~/output/$${path_prefix}/index.html ~/output ; \ done < versions ; .PHONY: build-docs +build-gh-docs: + @cd docs && \ + while read -r branch path_prefix; do \ + (git checkout $${branch} && npm install && VUEPRESS_BASE="/tendermint/$${path_prefix}/" npm run build) ; \ + mkdir -p ~/output/$${path_prefix} ; \ + cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ + cp ~/output/$${path_prefix}/index.html ~/output ; \ + done < versions ; +.PHONY: build-docs + +# todo remove once tendermint.com DNS is solved sync-docs: cd ~/output && \ echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \ @@ -178,8 +226,8 @@ sync-docs: ### Docker image ### ############################################################################### -build-docker: - cp $(OUTPUT) DOCKER/tendermint +build-docker: build-linux + cp $(BUILDDIR)/tendermint DOCKER/tendermint docker build --label=tendermint --tag="tendermint/tendermint" DOCKER rm -rf DOCKER/tendermint .PHONY: build-docker @@ -197,9 +245,9 @@ build-docker-localnode: @cd networks/local && make .PHONY: build-docker-localnode -# Runs `make build_c` from within an Amazon Linux (v2)-based Docker build -# container in order to build an Amazon Linux-compatible binary. Produces a -# compatible binary at ./build/tendermint +# Runs `make build TENDERMINT_BUILD_OPTIONS=cleveldb` from within an Amazon +# Linux (v2)-based Docker build container in order to build an Amazon +# Linux-compatible binary. Produces a compatible binary at ./build/tendermint build_c-amazonlinux: $(MAKE) -C ./DOCKER build_amazonlinux_buildimage docker run --rm -it -v `pwd`:/tendermint tendermint/tendermint:build_c-amazonlinux @@ -207,7 +255,7 @@ build_c-amazonlinux: # Run a 4-node testnet locally localnet-start: localnet-stop build-docker-localnode - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2; fi + @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --o . --starting-ip-address 192.167.10.2; fi docker-compose up .PHONY: localnet-start @@ -233,3 +281,17 @@ endif contract-tests: dredd .PHONY: contract-tests + +clean: + rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/ + +build-reproducible: + docker rm latest-build || true + docker run --volume=$(CURDIR):/sources:ro \ + --env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \ + --env APP=tendermint \ + --env COMMIT=$(shell git rev-parse --short=8 HEAD) \ + --env VERSION=$(shell git describe --tags) \ + --name latest-build cosmossdk/rbuilder:latest + docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/ +.PHONY: build-reproducible diff --git a/PHILOSOPHY.md b/PHILOSOPHY.md deleted file mode 100644 index 9be7a56cb..000000000 --- a/PHILOSOPHY.md +++ /dev/null @@ -1,158 +0,0 @@ -## Design goals - -The design goals for Tendermint (and the SDK and related libraries) are: - - * Simplicity and Legibility - * Parallel performance, namely ability to utilize multicore architecture - * Ability to evolve the codebase bug-free - * Debuggability - * Complete correctness that considers all edge cases, esp in concurrency - * Future-proof modular architecture, message protocol, APIs, and encapsulation - - -### Justification - -Legibility is key to maintaining bug-free software as it evolves toward more -optimizations, more ease of debugging, and additional features. - -It is too easy to introduce bugs over time by replacing lines of code with -those that may panic, which means ideally locks are unlocked by defer -statements. - -For example, - -```go -func (obj *MyObj) something() { - mtx.Lock() - obj.something = other - mtx.Unlock() -} -``` - -It is too easy to refactor the codebase in the future to replace `other` with -`other.String()` for example, and this may introduce a bug that causes a -deadlock. So as much as reasonably possible, we need to be using defer -statements, even though it introduces additional overhead. - -If it is necessary to optimize the unlocking of mutex locks, the solution is -more modularity via smaller functions, so that defer'd unlocks are scoped -within a smaller function. - -Similarly, idiomatic for-loops should always be preferred over those that use -custom counters, because it is too easy to evolve the body of a for-loop to -become more complicated over time, and it becomes more and more difficult to -assess the correctness of such a for-loop by visual inspection. - - -### On performance - -It doesn't matter whether there are alternative implementations that are 2x or -3x more performant, when the software doesn't work, deadlocks, or if bugs -cannot be debugged. By taking advantage of multicore concurrency, the -Tendermint implementation will at least be an order of magnitude within the -range of what is theoretically possible. The design philosophy of Tendermint, -and the choice of Go as implementation language, is designed to make Tendermint -implementation the standard specification for concurrent BFT software. - -By focusing on the message protocols (e.g. ABCI, p2p messages), and -encapsulation e.g. IAVL module, (relatively) independent reactors, we are both -implementing a standard implementation to be used as the specification for -future implementations in more optimizable languages like Rust, Java, and C++; -as well as creating sufficiently performant software. Tendermint Core will -never be as fast as future implementations of the Tendermint Spec, because Go -isn't designed to be as fast as possible. The advantage of using Go is that we -can develop the whole stack of modular components **faster** than in other -languages. - -Furthermore, the real bottleneck is in the application layer, and it isn't -necessary to support more than a sufficiently decentralized set of validators -(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS). - -Instead of optimizing Tendermint performance down to the metal, lets focus on -optimizing on other matters, namely ability to push feature complete software -that works well enough, can be debugged and maintained, and can serve as a spec -for future implementations. - - -### On encapsulation - -In order to create maintainable, forward-optimizable software, it is critical -to develop well-encapsulated objects that have well understood properties, and -to re-use these easy-to-use-correctly components as building blocks for further -encapsulated meta-objects. - -For example, mutexes are cheap enough for Tendermint's design goals when there -isn't goroutine contention, so it is encouraged to create concurrency safe -structures with struct-level mutexes. If they are used in the context of -non-concurrent logic, then the performance is good enough. If they are used in -the context of concurrent logic, then it will still perform correctly. - -Examples of this design principle can be seen in the types.ValidatorSet struct, -and the rand.Rand struct. It's one single struct declaration that can be used -in both concurrent and non-concurrent logic, and due to its well encapsulation, -it's easy to get the usage of the mutex right. - -#### example: rand.Rand: - -`The default Source is safe for concurrent use by multiple goroutines, but -Sources created by NewSource are not`. The reason why the default -package-level source is safe for concurrent use is because it is protected (see -`lockedSource` in https://golang.org/src/math/rand/rand.go). - -But we shouldn't rely on the global source, we should be creating our own -Rand/Source instances and using them, especially for determinism in testing. -So it is reasonable to have rand.Rand be protected by a mutex. Whether we want -our own implementation of Rand is another question, but the answer there is -also in the affirmative. Sometimes you want to know where Rand is being used -in your code, so it becomes a simple matter of dropping in a log statement to -inject inspectability into Rand usage. Also, it is nice to be able to extend -the functionality of Rand with custom methods. For these reasons, and for the -reasons which is outlined in this design philosophy document, we should -continue to use the rand.Rand object, with mutex protection. - -Another key aspect of good encapsulation is the choice of exposed vs unexposed -methods. It should be clear to the reader of the code, which methods are -intended to be used in what context, and what safe usage is. Part of this is -solved by hiding methods via unexported methods. Another part of this is -naming conventions on the methods (e.g. underscores) with good documentation, -and code organization. If there are too many exposed methods and it isn't -clear what methods have what side effects, then there is something wrong about -the design of abstractions that should be revisited. - - -### On concurrency - -In order for Tendermint to remain relevant in the years to come, it is vital -for Tendermint to take advantage of multicore architectures. Due to the nature -of the problem, namely consensus across a concurrent p2p gossip network, and to -handle RPC requests for a large number of consuming subscribers, it is -unavoidable for Tendermint development to require expertise in concurrency -design, especially when it comes to the reactor design, and also for RPC -request handling. - - -## Guidelines - -Here are some guidelines for designing for (sufficient) performance and concurrency: - - * Mutex locks are cheap enough when there isn't contention. - * Do not optimize code without analytical or observed proof that it is in a hot path. - * Don't over-use channels when mutex locks w/ encapsulation are sufficient. - * The need to drain channels are often a hint of unconsidered edge cases. - * The creation of O(N) one-off goroutines is generally technical debt that - needs to get addressed sooner than later. Avoid creating too many -goroutines as a patch around incomplete concurrency design, or at least be -aware of the debt and do not invest in the debt. On the other hand, Tendermint -is designed to have a limited number of peers (e.g. 10 or 20), so the creation -of O(C) goroutines per O(P) peers is still O(C\*P=constant). - * Use defer statements to unlock as much as possible. If you want to unlock sooner, - try to create more modular functions that do make use of defer statements. - -## Matras - -* Premature optimization kills -* Readability is paramount -* Beautiful is better than fast. -* In the face of ambiguity, refuse the temptation to guess. -* In the face of bugs, refuse the temptation to cover the bug. -* There should be one-- and preferably only one --obvious way to do it. diff --git a/README.md b/README.md index 79db68ca6..efb4f13c4 100644 --- a/README.md +++ b/README.md @@ -7,15 +7,16 @@ Or [Blockchain](), for short. [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) -[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://godoc.org/github.com/tendermint/tendermint) -[![Go version](https://img.shields.io/badge/go-1.13-blue.svg)](https://github.com/moovweb/gvm) -[![Discord](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd) +[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint) +[![Go version](https://img.shields.io/badge/go-1.15-blue.svg)](https://github.com/moovweb/gvm) +[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) -[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) +[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) +[![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge) -| Branch | Tests | Coverage | -| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -| master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | +| Branch | Tests | Coverage | Linting | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- | +| master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master)
![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) | Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. @@ -27,29 +28,27 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab ## Releases -NOTE: The master branch is now an active development branch (starting with `v0.32`). Please, do not depend on it and -use [releases](https://github.com/tendermint/tendermint/releases) instead. +Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead. Tendermint is being used in production in both private and public environments, most notably the blockchains of the [Cosmos Network](https://cosmos.network/). However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0. See below for more details about [versioning](#versioning). -In any case, if you intend to run Tendermint in production, -please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org). +In any case, if you intend to run Tendermint in production, we're happy to help. You can +contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd). ## Security To report a security vulnerability, see our [bug bounty -program](https://hackerone.com/tendermint) - -For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md) +program](https://hackerone.com/tendermint). +For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md) ## Minimum requirements | Requirement | Notes | | ----------- | ---------------- | -| Go version | Go1.13 or higher | +| Go version | Go1.15 or higher | ## Documentation @@ -57,13 +56,13 @@ Complete documentation can be found on the [website](https://docs.tendermint.com ### Install -See the [install instructions](/docs/introduction/install.md) +See the [install instructions](/docs/introduction/install.md). ### Quick Start - [Single node](/docs/introduction/quick-start.md) - [Local cluster using docker-compose](/docs/networks/docker-compose.md) -- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md) +- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md) - [Join the Cosmos testnet](https://cosmos.network/testnet) ## Contributing @@ -71,12 +70,9 @@ See the [install instructions](/docs/introduction/install.md) Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) -and the [style guide](STYLE_GUIDE.md). - -To get more active, Join the wider community at [Discord](https://discord.gg/AzefAFd) or jump onto the [Forum](https://forum.cosmos.network/). - -Learn more by reading the code and the -[specifications](https://github.com/tendermint/spec) or watch the [Developer Sessions](/docs/DEV_SESSIONS.md) and read up on the +and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the +[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md), +and familiarize yourself with our [Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). ## Versioning @@ -89,45 +85,48 @@ According to SemVer, anything in the public API can change at any time before ve To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used to signal breaking changes across a subset of the total public API. This subset includes all interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not -include the in-process Go APIs. +include the Go APIs. That said, breaking changes in the following packages will be documented in the CHANGELOG even if they don't lead to MINOR version bumps: - crypto -- types -- rpc/client - config -- node - libs - - bech32 - - common - - db - - errors - - log - -Exported objects in these packages that are not covered by the versioning scheme -are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any -time without notice. Functions, types, and values in any other package may also change at any time. + - bech32 + - bits + - bytes + - json + - log + - math + - net + - os + - protoio + - rand + - sync + - strings + - service +- node +- rpc/client +- types ### Upgrades In an effort to avoid accumulating technical debt prior to 1.0.0, we do not guarantee that breaking changes (ie. bumps in the MINOR version) -will work with existing tendermint blockchains. In these cases you will +will work with existing Tendermint blockchains. In these cases you will have to start a new blockchain, or write something custom to get the old -data into the new chain. +data into the new chain. However, any bump in the PATCH version should be +compatible with existing blockchain histories. -However, any bump in the PATCH version should be compatible with existing histories -(if not please open an [issue](https://github.com/tendermint/tendermint/issues)). For more information on upgrading, see [UPGRADING.md](./UPGRADING.md). ### Supported Versions -Because we are a small core team, we only ship patch updates, including security updates, -to the most recent minor release and the second-most recent minor release. Consequently, -we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found +Because we are a small core team, we only ship patch updates, including security updates, +to the most recent minor release and the second-most recent minor release. Consequently, +we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md). ## Resources @@ -138,20 +137,12 @@ For details about the blockchain data structures and the p2p protocols, see the [Tendermint specification](https://docs.tendermint.com/master/spec/). For details on using the software, see the [documentation](/docs/) which is also -hosted at: https://docs.tendermint.com/master/ +hosted at: ### Tools -Benchmarking is provided by `tm-load-test`. -The code for `tm-load-test` can be found [here](https://github.com/informalsystems/tm-load-test) this binary needs to be built separately. -Additional documentation is found [here](/docs/tools). - -### Sub-projects - -- [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with - interfaces -- [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation -- [Tm-db](http://github.com/tendermint/tm-db), Data Base abstractions to be used in applications. +Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test). +Additional tooling can be found in [/docs/tools](/docs/tools). ### Applications diff --git a/SECURITY.md b/SECURITY.md index 8a373a290..351f5606c 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,72 +1,154 @@ # Security +## Reporting a Bug + As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a [bug bounty](https://hackerone.com/tendermint). -See the policy for more details on submissions and rewards. +See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. -Here is a list of examples of the kinds of bugs we're most interested in: +### Guidelines -## Specification +We require that all researchers: -- Conceptual flaws -- Ambiguities, inconsistencies, or incorrect statements -- Mis-match between specification and implementation of any component +* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups +* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data +* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed +* Avoid posting personally identifiable information, privately or publicly -## Consensus +If you follow these guidelines when reporting an issue to us, we commit to: -Assuming less than 1/3 of the voting power is Byzantine (malicious): +* Not pursue or support any legal action related to your research on this vulnerability +* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion + +## Disclosure Process + +Tendermint Core uses the following disclosure process: + +1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS. +2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub. +3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible. +4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority. +5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc). +6. 24 hours following this notification, the fixes are applied publicly and new releases are issued. +7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases. +8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself. +9. Once the community is notified, we will pay out any relevant bug bounties to submitters. +10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it. + +This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible. + +### Example Timeline + +The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles. + +#### > 24 Hours Before Release Time + +1. Request CVE number (ADMIN) +2. Gather emails and other contact info for validators (COMMS LEAD) +3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG) +4. Write “Security Advisory” for forum (TENDERMINT LEAD) + +#### 24 Hours Before Release Time + +1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD) +2. Post Tweet linking to forum post (COMMS LEAD) +3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD) +4. Send emails to validators or other users (PARTNERSHIPS LEAD) + +#### Release Time + +1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD) +2. Cut Cosmos SDK release for eligible versions (COSMOS ENG) +3. Cut Gaia release for eligible versions (GAIA ENG) +4. Post “Security releases” on forum (TENDERMINT LEAD) +5. Post new Tweet linking to forum post (COMMS LEAD) +6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD) +7. Send emails to validators or other users (COMMS LEAD) +8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN) + +#### After Release Time -- Validation of blockchain data structures, including blocks, block parts, - votes, and so on -- Execution of blocks -- Validator set changes -- Proposer round robin -- Two nodes committing conflicting blocks for the same height (safety failure) -- A correct node signing conflicting votes -- A node halting (liveness failure) -- Syncing new and old nodes +1. Write forum post with exploit details (TENDERMINT LEAD) +2. Approve pay-out on HackerOne for submitter (ADMIN) + +#### 7 Days After Release Time + +1. Publish CVE if it has not yet been published (ADMIN) +2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD) + +## Supported Releases + +The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running. + +If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports. + +## Scope + +The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: + +* Any third-party services +* Findings from physical testing, such as office access +* Findings derived from social engineering (e.g., phishing) + +## Example Vulnerabilities + +The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in! + +### Specification + +* Conceptual flaws +* Ambiguities, inconsistencies, or incorrect statements +* Mis-match between specification and implementation of any component + +### Consensus + +Assuming less than 1/3 of the voting power is Byzantine (malicious): -## Networking +* Validation of blockchain data structures, including blocks, block parts, votes, and so on +* Execution of blocks +* Validator set changes +* Proposer round robin +* Two nodes committing conflicting blocks for the same height (safety failure) +* A correct node signing conflicting votes +* A node halting (liveness failure) +* Syncing new and old nodes -- Authenticated encryption (MITM, information leakage) -- Eclipse attacks -- Sybil attacks -- Long-range attacks -- Denial-of-Service -## RPC +### Networking -- Write-access to anything besides sending transactions -- Denial-of-Service -- Leakage of secrets +* Authenticated encryption (MITM, information leakage) +* Eclipse attacks +* Sybil attacks +* Long-range attacks +* Denial-of-Service -## Denial-of-Service +### RPC -Attacks may come through the P2P network or the RPC: +* Write-access to anything besides sending transactions +* Denial-of-Service +* Leakage of secrets -- Amplification attacks -- Resource abuse -- Deadlocks and race conditions -- Panics and unhandled errors +### Denial-of-Service -## Libraries +Attacks may come through the P2P network or the RPC layer: -- Serialization (Amino) -- Reading/Writing files and databases -- Logging and monitoring +* Amplification attacks +* Resource abuse +* Deadlocks and race conditions -## Cryptography +### Libraries -- Elliptic curves for validator signatures -- Hash algorithms and Merkle trees for block validation -- Authenticated encryption for P2P connections +* Serialization (Amino) +* Reading/Writing files and databases -## Light Client +### Cryptography -- Validation of blockchain data structures -- Correctly validating an incorrect proof -- Incorrectly validating a correct proof -- Syncing validator set changes +* Elliptic curves for validator signatures +* Hash algorithms and Merkle trees for block validation +* Authenticated encryption for P2P connections +### Light Client +* Core verification +* Bisection/sequential algorithms diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md index 6d0deda6d..98e81d723 100644 --- a/STYLE_GUIDE.md +++ b/STYLE_GUIDE.md @@ -15,6 +15,7 @@ We expect all contributors to be familiar with [Effective Go](https://golang.org Perhaps more key for code readability than good commenting is having the right structure. As a rule of thumb, try to write in a logical order of importance, taking a little time to think how to order and divide the code such that someone could scroll down and understand the functionality of it just as well as you do. A loose example of such order would be: + * Constants, global and package-level variables * Main Struct * Options (only if they are seen as critical to the struct else they should be placed in another file) @@ -26,113 +27,115 @@ scroll down and understand the functionality of it just as well as you do. A loo ## General - * Use `gofmt` (or `goimport`) to format all code upon saving it. (If you use VIM, check out vim-go). - * Use a linter (see below) and generally try to keep the linter happy (where it makes sense). - * Think about documentation, and try to leave godoc comments, when it will help new developers. - * Every package should have a high level doc.go file to describe the purpose of that package, its main functions, and any other relevant information. - * `TODO` should not be used. If important enough should be recorded as an issue. - * `BUG` / `FIXME` should be used sparingly to guide future developers on some of the vulnerabilities of the code. - * `XXX` can be used in work-in-progress (prefixed with "WIP:" on github) branches but they must be removed before approving a PR. - * Applications (e.g. clis/servers) *should* panic on unexpected unrecoverable errors and print a stack trace. +* Use `gofmt` (or `goimport`) to format all code upon saving it. (If you use VIM, check out vim-go). +* Use a linter (see below) and generally try to keep the linter happy (where it makes sense). +* Think about documentation, and try to leave godoc comments, when it will help new developers. +* Every package should have a high level doc.go file to describe the purpose of that package, its main functions, and any other relevant information. +* `TODO` should not be used. If important enough should be recorded as an issue. +* `BUG` / `FIXME` should be used sparingly to guide future developers on some of the vulnerabilities of the code. +* `XXX` can be used in work-in-progress (prefixed with "WIP:" on github) branches but they must be removed before approving a PR. +* Applications (e.g. clis/servers) *should* panic on unexpected unrecoverable errors and print a stack trace. ## Comments - * Use a space after comment deliminter (ex. `// your comment`). - * Many comments are not sentences. These should begin with a lower case letter and end without a period. - * Conversely, sentences in comments should be sentenced-cased and end with a period. +* Use a space after comment deliminter (ex. `// your comment`). +* Many comments are not sentences. These should begin with a lower case letter and end without a period. +* Conversely, sentences in comments should be sentenced-cased and end with a period. ## Linters These must be applied to all (Go) repos. - * [shellcheck](https://github.com/koalaman/shellcheck) - * [golangci-lint](https://github.com/golangci/golangci-lint) (covers all important linters) - - See the `.golangci.yml` file in each repo for linter configuration. +* [shellcheck](https://github.com/koalaman/shellcheck) +* [golangci-lint](https://github.com/golangci/golangci-lint) (covers all important linters) + * See the `.golangci.yml` file in each repo for linter configuration. ## Various - * Reserve "Save" and "Load" for long-running persistence operations. When parsing bytes, use "Encode" or "Decode". - * Maintain consistency across the codebase. - * Functions that return functions should have the suffix `Fn` - * Names should not [stutter](https://blog.golang.org/package-names). For example, a struct generally shouldn’t have +* Reserve "Save" and "Load" for long-running persistence operations. When parsing bytes, use "Encode" or "Decode". +* Maintain consistency across the codebase. +* Functions that return functions should have the suffix `Fn` +* Names should not [stutter](https://blog.golang.org/package-names). For example, a struct generally shouldn’t have a field named after itself; e.g., this shouldn't occur: + ``` golang type middleware struct { middleware Middleware } ``` - * In comments, use "iff" to mean, "if and only if". - * Product names are capitalized, like "Tendermint", "Basecoin", "Protobuf", etc except in command lines: `tendermint --help` - * Acronyms are all capitalized, like "RPC", "gRPC", "API". "MyID", rather than "MyId". - * Prefer errors.New() instead of fmt.Errorf() unless you're actually using the format feature with arguments. + +* In comments, use "iff" to mean, "if and only if". +* Product names are capitalized, like "Tendermint", "Basecoin", "Protobuf", etc except in command lines: `tendermint --help` +* Acronyms are all capitalized, like "RPC", "gRPC", "API". "MyID", rather than "MyId". +* Prefer errors.New() instead of fmt.Errorf() unless you're actually using the format feature with arguments. ## Importing Libraries Sometimes it's necessary to rename libraries to avoid naming collisions or ambiguity. - * Use [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) - * Separate imports into blocks - one for the standard lib, one for external libs and one for application libs. - * Here are some common library labels for consistency: - - dbm "github.com/tendermint/tm-db" - - tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" - - tmcfg "github.com/tendermint/tendermint/config/tendermint" - - tmtypes "github.com/tendermint/tendermint/types" - * Never use anonymous imports (the `.`), for example, `tmlibs/common` or anything else. - * When importing a pkg from the `tendermint/libs` directory, prefix the pkg alias with tm. - - tmbits "github.com/tendermint/tendermint/libs/bits" - * tip: Use the `_` library import to import a library for initialization effects (side effects) +* Use [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) +* Separate imports into blocks - one for the standard lib, one for external libs and one for application libs. +* Here are some common library labels for consistency: + * dbm "github.com/tendermint/tm-db" + * tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + * tmcfg "github.com/tendermint/tendermint/config/tendermint" + * tmtypes "github.com/tendermint/tendermint/types" +* Never use anonymous imports (the `.`), for example, `tmlibs/common` or anything else. +* When importing a pkg from the `tendermint/libs` directory, prefix the pkg alias with tm. + * tmbits "github.com/tendermint/tendermint/libs/bits" +* tip: Use the `_` library import to import a library for initialization effects (side effects) ## Dependencies - * Dependencies should be pinned by a release tag, or specific commit, to avoid breaking `go get` when external dependencies are updated. - * Refer to the [contributing](CONTRIBUTING.md) document for more details +* Dependencies should be pinned by a release tag, or specific commit, to avoid breaking `go get` when external dependencies are updated. +* Refer to the [contributing](CONTRIBUTING.md) document for more details ## Testing - * The first rule of testing is: we add tests to our code - * The second rule of testing is: we add tests to our code - * For Golang testing: - * Make use of table driven testing where possible and not-cumbersome - - [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) - * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) - * When using mocks, it is recommended to use Testify [mock] (https://pkg.go.dev/github.com/stretchr/testify/mock +* The first rule of testing is: we add tests to our code +* The second rule of testing is: we add tests to our code +* For Golang testing: + * Make use of table driven testing where possible and not-cumbersome + * [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) + * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) +* When using mocks, it is recommended to use Testify [mock] ( ) along with [Mockery](https://github.com/vektra/mockery) for autogeneration ## Errors - * Ensure that errors are concise, clear and traceable. - * Use stdlib errors package. - * For wrapping errors, use `fmt.Errorf()` with `%w`. - * Panic is appropriate when an internal invariant of a system is broken, while all other cases (in particular, +* Ensure that errors are concise, clear and traceable. +* Use stdlib errors package. +* For wrapping errors, use `fmt.Errorf()` with `%w`. +* Panic is appropriate when an internal invariant of a system is broken, while all other cases (in particular, incorrect or invalid usage) should return errors. ## Config - * Currently the TOML filetype is being used for config files - * A good practice is to store per-user config files under `~/.[yourAppName]/config.toml` +* Currently the TOML filetype is being used for config files +* A good practice is to store per-user config files under `~/.[yourAppName]/config.toml` ## CLI - * When implementing a CLI use [Cobra](https://github.com/spf13/cobra) and [Viper](https://github.com/spf13/viper). - * Helper messages for commands and flags must be all lowercase. - * Instead of using pointer flags (eg. `FlagSet().StringVar`) use Viper to retrieve flag values (eg. `viper.GetString`) - - The flag key used when setting and getting the flag should always be stored in a +* When implementing a CLI use [Cobra](https://github.com/spf13/cobra) and [Viper](https://github.com/spf13/viper). +* Helper messages for commands and flags must be all lowercase. +* Instead of using pointer flags (eg. `FlagSet().StringVar`) use Viper to retrieve flag values (eg. `viper.GetString`) + * The flag key used when setting and getting the flag should always be stored in a variable taking the form `FlagXxx` or `flagXxx`. - - Flag short variable descriptions should always start with a lower case character as to remain consistent with + * Flag short variable descriptions should always start with a lower case character as to remain consistent with the description provided in the default `--help` flag. ## Version - * Every repo should have a version/version.go file that mimics the Tendermint Core repo - * We read the value of the constant version in our build scripts and hence it has to be a string +* Every repo should have a version/version.go file that mimics the Tendermint Core repo +* We read the value of the constant version in our build scripts and hence it has to be a string ## Non-Go Code - * All non-Go code (`*.proto`, `Makefile`, `*.sh`), where there is no common +* All non-Go code (`*.proto`, `Makefile`, `*.sh`), where there is no common agreement on style, should be formatted according to [EditorConfig](http://editorconfig.org/) config: - ``` + ```toml # top-most EditorConfig file root = true diff --git a/UPGRADING.md b/UPGRADING.md index df3d7101d..065e9822d 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -1,52 +1,307 @@ # Upgrading Tendermint Core -This guide provides steps to be followed when you upgrade your applications to -a newer version of Tendermint Core. +This guide provides instructions for upgrading to specific versions of Tendermint Core. -## Unreleased +## v0.34.0 - +**Upgrading to Tendermint 0.34 requires a blockchain restart.** +This release is not compatible with previous blockchains due to changes to +the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol"). + +Note also that Tendermint 0.34 also requires Go 1.15 or higher. + +### ABCI Changes + +* The `ABCIVersion` is now `0.17.0`. + +* New ABCI methods (`ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`) + were added to support the new State Sync feature. + Previously, syncing a new node to a preexisting network could take days; but with State Sync, + new nodes are able to join a network in a matter of seconds. + Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync) + if you want to learn more about State Sync, or if you'd like your application to use it. + (If you don't want to support State Sync in your application, you can just implement these new + ABCI methods as no-ops, leaving them empty.) + +* `KV.Pair` has been replaced with `abci.EventAttribute`. The `EventAttribute.Index` field + allows ABCI applications to dictate which events should be indexed. + +* The blockchain can now start from an arbitrary initial height, + provided to the application via `RequestInitChain.InitialHeight`. + +* ABCI evidence type is now an enum with two recognized types of evidence: + `DUPLICATE_VOTE` and `LIGHT_CLIENT_ATTACK`. + Applications should be able to handle these evidence types + (i.e., through slashing or other accountability measures). + +* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15) + (used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type. + Note that since Tendermint only supports ed25519 validator keys, there's only one + option in the `oneof`. For more, see "Protocol Buffers," below. + +* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`. + For more, see "Crypto," below. + +* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's. + +### P2P Protocol + +The default codec is now proto3, not amino. The schema files can be found in the `/proto` +directory. For more, see "Protobuf," below. + +### Blockchain Protocol + +* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from +`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed` +fields. + +* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input, + to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962). + This mainly affects `Header#DataHash`, `Header#LastResultsHash`, and + `Header#EvidenceHash`, which are often empty. Non-empty hashes can also be affected, e.g. if their + inputs depend on other (empty) Merkle hashes, giving different results. + +### Transaction Indexing + +Tendermint now relies on the application to tell it which transactions to index. This means that +in the `config.toml`, generated by Tendermint, there is no longer a way to specify which +transactions to index. `tx.height` and `tx.hash` will always be indexed when using the `kv` indexer. + +Applications must now choose to either a) enable indexing for all transactions, or +b) allow node operators to decide which transactions to index. +Applications can notify Tendermint to index a specific transaction by setting +`Index: bool` to `true` in the Event Attribute: + +```go +[]types.Event{ + { + Type: "app", + Attributes: []types.EventAttribute{ + {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true}, + }, + }, +} +``` + +### Protocol Buffers + +Tendermint 0.34 replaces Amino with Protocol Buffers for encoding. +This migration is extensive and results in a number of changes, however, +Tendermint only uses the types generated from Protocol Buffers for disk and +wire serialization. +**This means that these changes should not affect you as a Tendermint user.** + +However, Tendermint users and contributors may note the following changes: + +* Directory layout changes: All proto files have been moved under one directory, `/proto`. + This is in line with the recommended file layout by [Buf](https://buf.build). + For more, see the [Buf documentation](https://buf.build/docs/lint-checkers#file_layout). +* ABCI Changes: As noted in the "ABCI Changes" section above, the `PublicKey` type now uses + a `oneof` type. + +For more on the Protobuf changes, please see our [blog post on this migration](https://medium.com/tendermint/tendermint-0-34-protocol-buffers-and-you-8c40558939ae). + +### Consensus Parameters + +Tendermint 0.34 includes new and updated consensus parameters. + +#### Version Parameters (New) + +* `AppVersion`, which is the version of the ABCI application. + +#### Evidence Parameters + +* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB). + +### Crypto + +#### Keys + +* Keys no longer include a type prefix. For example, ed25519 pubkeys have been renamed from + `PubKeyEd25519` to `PubKey`. This reduces stutter (e.g., `ed25519.PubKey`). +* Keys are now byte slices (`[]byte`) instead of byte arrays (`[]byte`). +* The multisig functionality that was previously in Tendermint now has + a new home within the Cosmos SDK: + [`cosmos/cosmos-sdk/types/multisig`](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go). + +#### `merkle` Package + +* `SimpleHashFromMap()` and `SimpleProofsFromMap()` were removed. +* The prefix `Simple` has been removed. (For example, `SimpleProof` is now called `Proof`.) +* All protobuf messages have been moved to the `/proto` directory. +* The protobuf message `Proof` that contained multiple ProofOp's has been renamed to `ProofOps`. + As noted above, this affects the ABCI type `ResponseQuery`: + The field that was named Proof is now named `ProofOps`. +* `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, to conform with + [RFC-6962](https://tools.ietf.org/html/rfc6962). + +### `libs` Package + +The `bech32` package has moved to the Cosmos SDK: +[`cosmos/cosmos-sdk/types/bech32`](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32). + +### CLI + +The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API. +See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details. + +### Light Client + +We have a new, rewritten light client! You can +[read more](https://medium.com/tendermint/everything-you-need-to-know-about-the-tendermint-light-client-f80d03856f98) +about the justifications and details behind this change. + +Other user-relevant changes include: + +* The old `lite` package was removed; the new light client uses the `light` package. +* The `Verifier` was broken up into two pieces: + * Core verification logic (pure `VerifyX` functions) + * `Client` object, which represents the complete light client +* The new light clients stores headers & validator sets as `LightBlock`s +* The RPC client can be found in the `/rpc` directory. +* The HTTP(S) proxy is located in the `/proxy` directory. + +### `state` Package + +* A new field `State.InitialHeight` has been added to record the initial chain height, which must be `1` + (not `0`) if starting from height `1`. This can be configured via the genesis field `initial_height`. +* The `state` package now has a `Store` interface. All functions in + [state/store.go](https://github.com/tendermint/tendermint/blob/56911ee35298191c95ef1c7d3d5ec508237aaff4/state/store.go#L42-L42) + are now part of the interface. The interface returns errors on all methods and can be used by calling `state.NewStore(dbm.DB)`. + +### `privval` Package + +All requests are now accompanied by the chain ID from the network. +This is a optional field and can be ignored by key management systems; +however, if you are using the same key management system for multiple different +blockchains, we recommend that you check the chain ID. + + +### RPC + +* `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and + `/unsafe_write_heap_profile` were removed. + For profiling, please use the pprof server, which can + be enabled through `--rpc.pprof_laddr=X` flag or `pprof_laddr=X` config setting + in the rpc section. +* The `Content-Type` header returned on RPC calls is now (correctly) set as `application/json`. + +### Version + +Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time. + +Example: + +```sh +go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd +``` + +Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`. ## v0.33.4 ### Go API -- `rpc/client` HTTP and local clients have been moved into `http` and `local` subpackages, and their constructors have been renamed to `New()`. +* `rpc/client` HTTP and local clients have been moved into `http` and `local` + subpackages, and their constructors have been renamed to `New()`. ### Protobuf Changes -When upgrading to version 0.33.4 you will have to fetch the `third_party` directory along with the updated proto files. +When upgrading to version 0.33.4 you will have to fetch the `third_party` +directory along with the updated proto files. + +### Block Retention + +ResponseCommit added a field for block retention. The application can provide information to Tendermint on how to prune blocks. +If an application would like to not prune any blocks pass a `0` in this field. + +```proto +message ResponseCommit { + // reserve 1 + bytes data = 2; // the Merkle root hash + ++ uint64 retain_height = 3; // the oldest block height to retain ++ +} +``` ## v0.33.0 -This release is not compatible with previous blockchains due to commit becoming signatures only and fields in the header have been removed. +This release is not compatible with previous blockchains due to commit becoming +signatures only and fields in the header have been removed. + +### Blockchain Protocol + +`TotalTxs` and `NumTxs` were removed from the header. `Commit` now consists +mostly of just signatures. + +```go +type Commit struct { + Height int64 + Round int + BlockID BlockID + Signatures []CommitSig +} +``` + +```go +type BlockIDFlag byte + +const ( + // BlockIDFlagAbsent - no vote was received from a validator. + BlockIDFlagAbsent BlockIDFlag = 0x01 + // BlockIDFlagCommit - voted for the Commit.BlockID. + BlockIDFlagCommit = 0x02 + // BlockIDFlagNil - voted for nil. + BlockIDFlagNil = 0x03 +) + +type CommitSig struct { + BlockIDFlag BlockIDFlag + ValidatorAddress Address + Timestamp time.Time + Signature []byte +} +``` + +See [\#63](https://github.com/tendermint/spec/pull/63) for the complete spec +change. + +### P2P Protocol + +The secret connection now includes a transcript hashing. If you want to +implement a handshake (or otherwise have an existing implementation), you'll +need to make the same changes that were made +[here](https://github.com/tendermint/tendermint/pull/3668). ### Config Changes You will need to generate a new config if you have used a prior version of tendermint. -- Tags have been entirely renamed throughout the codebase to events and there keys are called [compositeKeys](https://github.com/tendermint/tendermint/blob/6d05c531f7efef6f0619155cf10ae8557dd7832f/docs/app-dev/indexing-transactions.md). -- Evidence Params has been changed to include duration. - - `consensus_params.evidence.max_age_duration`. - - Renamed `consensus_params.evidence.max_age` to `max_age_num_blocks`. +Tags have been entirely renamed throughout the codebase to events and there +keys are called +[compositeKeys](https://github.com/tendermint/tendermint/blob/6d05c531f7efef6f0619155cf10ae8557dd7832f/docs/app-dev/indexing-transactions.md). + +Evidence Params has been changed to include duration. + +* `consensus_params.evidence.max_age_duration`. +* Renamed `consensus_params.evidence.max_age` to `max_age_num_blocks`. ### Go API -- `libs/common` has been removed in favor of specific pkgs. - - `async` - - `service` - - `rand` - - `net` - - `strings` - - `cmap` -- removal of `errors` pkg +* `libs/common` has been removed in favor of specific pkgs. + * `async` + * `service` + * `rand` + * `net` + * `strings` + * `cmap` +* removal of `errors` pkg ### RPC Changes -- `/validators` is now paginated (default: 30 vals per page) -- `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results) -- Event suffix has been removed from the ID in event responses -- IDs are now integers not `json-client-XYZ` +* `/validators` is now paginated (default: 30 vals per page) +* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results) +* Event suffix has been removed from the ID in event responses +* IDs are now integers not `json-client-XYZ` ## v0.32.0 @@ -163,7 +418,7 @@ the compilation tag: Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or use `make build_c` / `make install_c` (full instructions can be found at -https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support) +) ## v0.31.0 @@ -238,14 +493,14 @@ due to changes in how various data structures are hashed. Any implementations of Tendermint blockchain verification, including lite clients, will need to be updated. For specific details: -- [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees) -- [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams) +* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees) +* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams) There was also a small change to field ordering in the vote struct. Any implementations of an out-of-process validator (like a Key-Management Server) will need to be updated. For specific details: -- [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes) +* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes) Finally, the proposer selection algorithm continues to evolve. See the [work-in-progress @@ -281,7 +536,7 @@ To upgrade manually, use the provided `privValUpgrade.go` script, with exact pat to use the default paths, of `config/priv_validator_key.json` and `data/priv_validator_state.json`, respectively: -``` +```sh go run scripts/privValUpgrade.go ``` @@ -351,8 +606,8 @@ old data to be compatible with the new version. To reset the state do: -``` -$ tendermint unsafe_reset_all +```sh +tendermint unsafe_reset_all ``` Here we summarize some other notable changes to be mindful of. @@ -389,7 +644,7 @@ the root of another. If you don't need this functionality, and you used to return `` here, you should instead return a single `ProofOp` with just the `Data` field set: -``` +```go []ProofOp{ ProofOp{ Data: , @@ -399,10 +654,10 @@ just the `Data` field set: For more information, see: -- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md) -- [Relevant ABCI +* [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md) +* [Relevant ABCI documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs) -- [Description of +* [Description of keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14) ### Go API Changes @@ -437,8 +692,8 @@ old data to be compatible with the new version. To reset the state do: -``` -$ tendermint unsafe_reset_all +```sh +tendermint unsafe_reset_all ``` Here we summarize some other notable changes to be mindful of. @@ -448,7 +703,7 @@ Here we summarize some other notable changes to be mindful of. `p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and `p2p.max_num_outbound_peers`. -``` +```toml # Maximum number of inbound peers max_num_inbound_peers = 40 diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 695d1333c..000000000 --- a/Vagrantfile +++ /dev/null @@ -1,62 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| - config.vm.box = "ubuntu/xenial64" - - config.vm.provider "virtualbox" do |v| - v.memory = 4096 - v.cpus = 2 - end - - config.vm.provision "shell", inline: <<-SHELL - apt-get update - - # install base requirements - apt-get install -y --no-install-recommends wget curl jq zip \ - make shellcheck bsdmainutils psmisc - apt-get install -y language-pack-en - - # install docker - apt-get install -y --no-install-recommends apt-transport-https \ - ca-certificates curl software-properties-common - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - apt-get install -y docker-ce - usermod -a -G docker vagrant - - # install go - wget -q https://dl.google.com/go/go1.13.linux-amd64.tar.gz - tar -xvf go1.13.linux-amd64.tar.gz - mv go /usr/local - rm -f go1.13.linux-amd64.tar.gz - - # install nodejs (for docs) - curl -sL https://deb.nodesource.com/setup_11.x | bash - - apt-get install -y nodejs - - # cleanup - apt-get autoremove -y - - # set env variables - echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile - echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile - echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile - echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile - echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile - - mkdir -p /home/vagrant/go/bin - mkdir -p /home/vagrant/go/src/github.com/tendermint - ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint - - chown -R vagrant:vagrant /home/vagrant/go - chown vagrant:vagrant /home/vagrant/.bash_profile - - # get all deps and tools, ready to install/test - su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools' - SHELL -end diff --git a/abci/README.md b/abci/README.md index cd0e9bbd9..6399f5901 100644 --- a/abci/README.md +++ b/abci/README.md @@ -27,11 +27,10 @@ A detailed description of the ABCI methods and message types is contained in: To compile the protobuf file, run (from the root of the repo): -``` +```sh make protoc_abci ``` See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers) for details on compiling for other languages. Note we also include a [GRPC](https://www.grpc.io/docs) service definition. - diff --git a/abci/client/client.go b/abci/client/client.go index 4f7c7b69a..ede559fdc 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -6,6 +6,7 @@ import ( "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) const ( @@ -13,6 +14,8 @@ const ( echoRetryIntervalSeconds = 1 ) +//go:generate mockery --case underscore --name Client + // Client defines an interface for an ABCI client. // All `Async` methods return a `ReqRes` object. // All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error. @@ -27,7 +30,6 @@ type Client interface { FlushAsync() *ReqRes EchoAsync(msg string) *ReqRes InfoAsync(types.RequestInfo) *ReqRes - SetOptionAsync(types.RequestSetOption) *ReqRes DeliverTxAsync(types.RequestDeliverTx) *ReqRes CheckTxAsync(types.RequestCheckTx) *ReqRes QueryAsync(types.RequestQuery) *ReqRes @@ -35,11 +37,14 @@ type Client interface { InitChainAsync(types.RequestInitChain) *ReqRes BeginBlockAsync(types.RequestBeginBlock) *ReqRes EndBlockAsync(types.RequestEndBlock) *ReqRes + ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes + OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes + LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes + ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes FlushSync() error EchoSync(msg string) (*types.ResponseEcho, error) InfoSync(types.RequestInfo) (*types.ResponseInfo, error) - SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error) DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error) CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error) QuerySync(types.RequestQuery) (*types.ResponseQuery, error) @@ -47,6 +52,10 @@ type Client interface { InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) + ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //---------------------------------------- @@ -76,7 +85,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx sync.Mutex + mtx tmsync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 01583bc1f..98ae9073f 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/abci/types" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) var _ Client = (*grpcClient)(nil) @@ -22,10 +23,11 @@ type grpcClient struct { service.BaseService mustConnect bool - client types.ABCIApplicationClient - conn *grpc.ClientConn + client types.ABCIApplicationClient + conn *grpc.ClientConn + chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx sync.Mutex + mtx tmsync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks @@ -35,6 +37,13 @@ func NewGRPCClient(addr string, mustConnect bool) Client { cli := &grpcClient{ addr: addr, mustConnect: mustConnect, + // Buffering the channel is needed to make calls appear asynchronous, + // which is required when the caller makes multiple async calls before + // processing callbacks (e.g. due to holding locks). 64 means that a + // caller can make up to 64 async calls before a callback must be + // processed (otherwise it deadlocks). It also means that we can make 64 + // gRPC calls while processing a slow callback at the channel head. + chReqRes: make(chan *ReqRes, 64), } cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli) return cli @@ -48,6 +57,37 @@ func (cli *grpcClient) OnStart() error { if err := cli.BaseService.OnStart(); err != nil { return err } + + // This processes asynchronous request/response messages and dispatches + // them to callbacks. + go func() { + // Use a separate function to use defer for mutex unlocks (this handles panics) + callCb := func(reqres *ReqRes) { + cli.mtx.Lock() + defer cli.mtx.Unlock() + + reqres.SetDone() + reqres.Done() + + // Notify client listener if set + if cli.resCb != nil { + cli.resCb(reqres.Request, reqres.Response) + } + + // Notify reqRes listener if set + if cb := reqres.GetCallback(); cb != nil { + cb(reqres.Response) + } + } + for reqres := range cli.chReqRes { + if reqres != nil { + callCb(reqres) + } else { + cli.Logger.Error("Received nil reqres") + } + } + }() + RETRY_LOOP: for { conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) @@ -85,6 +125,7 @@ func (cli *grpcClient) OnStop() { if cli.conn != nil { cli.conn.Close() } + close(cli.chReqRes) } func (cli *grpcClient) StopForError(err error) { @@ -99,7 +140,9 @@ func (cli *grpcClient) StopForError(err error) { cli.mtx.Unlock() cli.Logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error())) - cli.Stop() + if err := cli.Stop(); err != nil { + cli.Logger.Error("Error stopping abci.grpcClient", "err", err) + } } func (cli *grpcClient) Error() error { @@ -151,15 +194,6 @@ func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes { return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}}) } -func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes { - req := types.ToRequestSetOption(params) - res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true)) - if err != nil { - cli.StopForError(err) - } - return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}}) -} - func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { req := types.ToRequestDeliverTx(params) res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true)) @@ -223,31 +257,79 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) } -func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { - reqres := NewReqRes(req) - reqres.Response = res // Set response - reqres.Done() // Release waiters - reqres.SetDone() // so reqRes.SetCallback will run the callback +func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes { + req := types.ToRequestListSnapshots(params) + res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) +} - // goroutine for callbacks - go func() { - cli.mtx.Lock() - defer cli.mtx.Unlock() +func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes { + req := types.ToRequestOfferSnapshot(params) + res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) +} - // Notify client listener if set - if cli.resCb != nil { - cli.resCb(reqres.Request, res) - } +func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes { + req := types.ToRequestLoadSnapshotChunk(params) + res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) +} - // Notify reqRes listener if set - if cb := reqres.GetCallback(); cb != nil { - cb(res) - } - }() +func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes { + req := types.ToRequestApplySnapshotChunk(params) + res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}) +} +// finishAsyncCall creates a ReqRes for an async call, and immediately populates it +// with the response. We don't complete it until it's been ordered via the channel. +func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { + reqres := NewReqRes(req) + reqres.Response = res + cli.chReqRes <- reqres // use channel for async responses, since they must be ordered return reqres } +// finishSyncCall waits for an async call to complete. It is necessary to call all +// sync calls asynchronously as well, to maintain call and response ordering via +// the channel, and this method will wait until the async call completes. +func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response { + // It's possible that the callback is called twice, since the callback can + // be called immediately on SetCallback() in addition to after it has been + // set. This is because completing the ReqRes happens in a separate critical + // section from the one where the callback is called: there is a race where + // SetCallback() is called between completing the ReqRes and dispatching the + // callback. + // + // We also buffer the channel with 1 response, since SetCallback() will be + // called synchronously if the reqres is already completed, in which case + // it will block on sending to the channel since it hasn't gotten around to + // receiving from it yet. + // + // ReqRes should really handle callback dispatch internally, to guarantee + // that it's only called once and avoid the above race conditions. + var once sync.Once + ch := make(chan *types.Response, 1) + reqres.SetCallback(func(res *types.Response) { + once.Do(func() { + ch <- res + }) + }) + return <-ch +} + //---------------------------------------- func (cli *grpcClient) FlushSync() error { @@ -257,50 +339,67 @@ func (cli *grpcClient) FlushSync() error { func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) { reqres := cli.EchoAsync(msg) // StopForError should already have been called if error is set - return reqres.Response.GetEcho(), cli.Error() + return cli.finishSyncCall(reqres).GetEcho(), cli.Error() } func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { reqres := cli.InfoAsync(req) - return reqres.Response.GetInfo(), cli.Error() -} - -func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { - reqres := cli.SetOptionAsync(req) - return reqres.Response.GetSetOption(), cli.Error() + return cli.finishSyncCall(reqres).GetInfo(), cli.Error() } func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { reqres := cli.DeliverTxAsync(params) - return reqres.Response.GetDeliverTx(), cli.Error() + return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error() } func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) { reqres := cli.CheckTxAsync(params) - return reqres.Response.GetCheckTx(), cli.Error() + return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error() } func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { reqres := cli.QueryAsync(req) - return reqres.Response.GetQuery(), cli.Error() + return cli.finishSyncCall(reqres).GetQuery(), cli.Error() } func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) { reqres := cli.CommitAsync() - return reqres.Response.GetCommit(), cli.Error() + return cli.finishSyncCall(reqres).GetCommit(), cli.Error() } func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) { reqres := cli.InitChainAsync(params) - return reqres.Response.GetInitChain(), cli.Error() + return cli.finishSyncCall(reqres).GetInitChain(), cli.Error() } func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { reqres := cli.BeginBlockAsync(params) - return reqres.Response.GetBeginBlock(), cli.Error() + return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error() } func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) { reqres := cli.EndBlockAsync(params) - return reqres.Response.GetEndBlock(), cli.Error() + return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error() +} + +func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + reqres := cli.ListSnapshotsAsync(params) + return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error() +} + +func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + reqres := cli.OfferSnapshotAsync(params) + return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error() +} + +func (cli *grpcClient) LoadSnapshotChunkSync( + params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + reqres := cli.LoadSnapshotChunkAsync(params) + return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error() +} + +func (cli *grpcClient) ApplySnapshotChunkSync( + params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + reqres := cli.ApplySnapshotChunkAsync(params) + return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error() } diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 3946bfbc5..293d5f549 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -1,10 +1,9 @@ package abcicli import ( - "sync" - types "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) var _ Client = (*localClient)(nil) @@ -16,14 +15,14 @@ var _ Client = (*localClient)(nil) type localClient struct { service.BaseService - mtx *sync.Mutex + mtx *tmsync.Mutex types.Application Callback } -func NewLocalClient(mtx *sync.Mutex, app types.Application) Client { +func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { if mtx == nil { - mtx = new(sync.Mutex) + mtx = new(tmsync.Mutex) } cli := &localClient{ mtx: mtx, @@ -70,17 +69,6 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes { ) } -func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.SetOption(req) - return app.callback( - types.ToRequestSetOption(req), - types.ToResponseSetOption(res), - ) -} - func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes { app.mtx.Lock() defer app.mtx.Unlock() @@ -158,6 +146,50 @@ func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { ) } +func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ListSnapshots(req) + return app.callback( + types.ToRequestListSnapshots(req), + types.ToResponseListSnapshots(res), + ) +} + +func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.OfferSnapshot(req) + return app.callback( + types.ToRequestOfferSnapshot(req), + types.ToResponseOfferSnapshot(res), + ) +} + +func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.LoadSnapshotChunk(req) + return app.callback( + types.ToRequestLoadSnapshotChunk(req), + types.ToResponseLoadSnapshotChunk(res), + ) +} + +func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ApplySnapshotChunk(req) + return app.callback( + types.ToRequestApplySnapshotChunk(req), + types.ToResponseApplySnapshotChunk(res), + ) +} + //------------------------------------------------------- func (app *localClient) FlushSync() error { @@ -176,14 +208,6 @@ func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er return &res, nil } -func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { - app.mtx.Lock() - defer app.mtx.Unlock() - - res := app.Application.SetOption(req) - return &res, nil -} - func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { app.mtx.Lock() defer app.mtx.Unlock() @@ -240,6 +264,40 @@ func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.Response return &res, nil } +func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ListSnapshots(req) + return &res, nil +} + +func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.OfferSnapshot(req) + return &res, nil +} + +func (app *localClient) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.LoadSnapshotChunk(req) + return &res, nil +} + +func (app *localClient) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ApplySnapshotChunk(req) + return &res, nil +} + //------------------------------------------------------- func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go new file mode 100644 index 000000000..2ff84183c --- /dev/null +++ b/abci/client/mocks/client.go @@ -0,0 +1,697 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + abcicli "github.com/tendermint/tendermint/abci/client" + log "github.com/tendermint/tendermint/libs/log" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// ApplySnapshotChunkAsync provides a mock function with given fields: _a0 +func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// ApplySnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BeginBlockAsync provides a mock function with given fields: _a0 +func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// BeginBlockSync provides a mock function with given fields: _a0 +func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseBeginBlock + if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseBeginBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckTxAsync provides a mock function with given fields: _a0 +func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// CheckTxSync provides a mock function with given fields: _a0 +func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseCheckTx + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitAsync provides a mock function with given fields: +func (_m *Client) CommitAsync() *abcicli.ReqRes { + ret := _m.Called() + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// CommitSync provides a mock function with given fields: +func (_m *Client) CommitSync() (*types.ResponseCommit, error) { + ret := _m.Called() + + var r0 *types.ResponseCommit + if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeliverTxAsync provides a mock function with given fields: _a0 +func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// DeliverTxSync provides a mock function with given fields: _a0 +func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseDeliverTx + if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseDeliverTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EchoAsync provides a mock function with given fields: msg +func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes { + ret := _m.Called(msg) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok { + r0 = rf(msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// EchoSync provides a mock function with given fields: msg +func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) { + ret := _m.Called(msg) + + var r0 *types.ResponseEcho + if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { + r0 = rf(msg) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEcho) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(msg) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EndBlockAsync provides a mock function with given fields: _a0 +func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// EndBlockSync provides a mock function with given fields: _a0 +func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseEndBlock + if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEndBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *Client) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FlushAsync provides a mock function with given fields: +func (_m *Client) FlushAsync() *abcicli.ReqRes { + ret := _m.Called() + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// FlushSync provides a mock function with given fields: +func (_m *Client) FlushSync() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InfoAsync provides a mock function with given fields: _a0 +func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// InfoSync provides a mock function with given fields: _a0 +func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInfo + if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InitChainAsync provides a mock function with given fields: _a0 +func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// InitChainSync provides a mock function with given fields: _a0 +func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInitChain + if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInitChain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsRunning provides a mock function with given fields: +func (_m *Client) IsRunning() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ListSnapshotsAsync provides a mock function with given fields: _a0 +func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// ListSnapshotsSync provides a mock function with given fields: _a0 +func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseListSnapshots) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadSnapshotChunkAsync provides a mock function with given fields: _a0 +func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// LoadSnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OfferSnapshotAsync provides a mock function with given fields: _a0 +func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// OfferSnapshotSync provides a mock function with given fields: _a0 +func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnReset provides a mock function with given fields: +func (_m *Client) OnReset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStart provides a mock function with given fields: +func (_m *Client) OnStart() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStop provides a mock function with given fields: +func (_m *Client) OnStop() { + _m.Called() +} + +// QueryAsync provides a mock function with given fields: _a0 +func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// QuerySync provides a mock function with given fields: _a0 +func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseQuery + if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Quit provides a mock function with given fields: +func (_m *Client) Quit() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Reset provides a mock function with given fields: +func (_m *Client) Reset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetLogger provides a mock function with given fields: _a0 +func (_m *Client) SetLogger(_a0 log.Logger) { + _m.Called(_a0) +} + +// SetResponseCallback provides a mock function with given fields: _a0 +func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) +} + +// Start provides a mock function with given fields: +func (_m *Client) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Client) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *Client) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 7898a8f26..040ac2d37 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -8,24 +8,22 @@ import ( "io" "net" "reflect" - "sync" "time" "github.com/tendermint/tendermint/abci/types" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/libs/timer" ) -const reqQueueSize = 256 // TODO make configurable -// const maxResponseSize = 1048576 // 1MB TODO make configurable -const flushThrottleMS = 20 // Don't wait longer than... - -var _ Client = (*socketClient)(nil) +const ( + reqQueueSize = 256 // TODO make configurable + flushThrottleMS = 20 // Don't wait longer than... +) -// This is goroutine-safe, but users should beware that -// the application in general is not meant to be interfaced -// with concurrent callers. +// This is goroutine-safe, but users should beware that the application in +// general is not meant to be interfaced with concurrent callers. type socketClient struct { service.BaseService @@ -36,13 +34,17 @@ type socketClient struct { reqQueue chan *ReqRes flushTimer *timer.ThrottleTimer - mtx sync.Mutex + mtx tmsync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. - } +var _ Client = (*socketClient)(nil) + +// NewSocketClient creates a new socket client, which connects to a given +// address. If mustConnect is true, the client will return an error upon start +// if it fails to connect. func NewSocketClient(addr string, mustConnect bool) Client { cli := &socketClient{ reqQueue: make(chan *ReqRes, reqQueueSize), @@ -57,19 +59,24 @@ func NewSocketClient(addr string, mustConnect bool) Client { return cli } +// OnStart implements Service by connecting to the server and spawning reading +// and writing goroutines. func (cli *socketClient) OnStart() error { - var err error - var conn net.Conn -RETRY_LOOP: + var ( + err error + conn net.Conn + ) + for { conn, err = tmnet.Connect(cli.addr) if err != nil { if cli.mustConnect { return err } - cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr), "err", err) + cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying after %vs...", + cli.addr, dialRetryIntervalSeconds), "err", err) time.Sleep(time.Second * dialRetryIntervalSeconds) - continue RETRY_LOOP + continue } cli.conn = conn @@ -80,39 +87,26 @@ RETRY_LOOP: } } +// OnStop implements Service by closing connection and flushing all queues. func (cli *socketClient) OnStop() { if cli.conn != nil { cli.conn.Close() } - cli.mtx.Lock() - defer cli.mtx.Unlock() cli.flushQueue() + cli.flushTimer.Stop() } -// Stop the client and set the error -func (cli *socketClient) StopForError(err error) { - if !cli.IsRunning() { - return - } - - cli.mtx.Lock() - if cli.err == nil { - cli.err = err - } - cli.mtx.Unlock() - - cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error())) - cli.Stop() -} - +// Error returns an error if the client was stopped abruptly. func (cli *socketClient) Error() error { cli.mtx.Lock() defer cli.mtx.Unlock() return cli.err } -// Set listener for all responses +// SetResponseCallback sets a callback, which will be executed for each +// non-error & non-empty response from the server. +// // NOTE: callback may get internally generated flush responses. func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() @@ -123,57 +117,60 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) { //---------------------------------------- func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { - w := bufio.NewWriter(conn) for { select { - case <-cli.flushTimer.Ch: - select { - case cli.reqQueue <- NewReqRes(types.ToRequestFlush()): - default: - // Probably will fill the buffer, or retry later. - } - case <-cli.Quit(): - return case reqres := <-cli.reqQueue: + // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) + cli.willSendReq(reqres) err := types.WriteMessage(reqres.Request, w) if err != nil { - cli.StopForError(fmt.Errorf("error writing msg: %v", err)) + cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } - // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) + + // If it's a flush request, flush the current buffer. if _, ok := reqres.Request.Value.(*types.Request_Flush); ok { err = w.Flush() if err != nil { - cli.StopForError(fmt.Errorf("error flushing writer: %v", err)) + cli.stopForError(fmt.Errorf("flush buffer: %w", err)) return } } + case <-cli.flushTimer.Ch: // flush queue + select { + case cli.reqQueue <- NewReqRes(types.ToRequestFlush()): + default: + // Probably will fill the buffer, or retry later. + } + case <-cli.Quit(): + return } } } func (cli *socketClient) recvResponseRoutine(conn io.Reader) { - - r := bufio.NewReader(conn) // Buffer reads + r := bufio.NewReader(conn) for { var res = &types.Response{} err := types.ReadMessage(r, res) if err != nil { - cli.StopForError(err) + cli.stopForError(fmt.Errorf("read message: %w", err)) return } + + // cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) + switch r := res.Value.(type) { - case *types.Response_Exception: + case *types.Response_Exception: // app responded with error // XXX After setting cli.err, release waiters (e.g. reqres.Done()) - cli.StopForError(errors.New(r.Exception.Error)) + cli.stopForError(errors.New(r.Exception.Error)) return default: - // cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) err := cli.didRecvResponse(res) if err != nil { - cli.StopForError(err) + cli.stopForError(err) return } } @@ -190,20 +187,21 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { cli.mtx.Lock() defer cli.mtx.Unlock() - // Get the first ReqRes + // Get the first ReqRes. next := cli.reqSent.Front() if next == nil { - return fmt.Errorf("unexpected result type %v when nothing expected", reflect.TypeOf(res.Value)) + return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value)) } + reqres := next.Value.(*ReqRes) if !resMatchesReq(reqres.Request, res) { - return fmt.Errorf("unexpected result type %v when response to %v expected", + return fmt.Errorf("unexpected %v when response to %v expected", reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value)) } - reqres.Response = res // Set response - reqres.Done() // Release waiters - cli.reqSent.Remove(next) // Pop first item from linked list + reqres.Response = res + reqres.Done() // release waiters + cli.reqSent.Remove(next) // pop first item from linked list // Notify client listener if set (global callback). if cli.resCb != nil { @@ -211,8 +209,9 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { } // Notify reqRes listener if set (request specific callback). - // NOTE: it is possible this callback isn't set on the reqres object. - // at this point, in which case it will be called after, when it is set. + // + // NOTE: It is possible this callback isn't set on the reqres object. At this + // point, in which case it will be called after, when it is set. if cb := reqres.GetCallback(); cb != nil { cb(res) } @@ -234,10 +233,6 @@ func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes { return cli.queueRequest(types.ToRequestInfo(req)) } -func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes { - return cli.queueRequest(types.ToRequestSetOption(req)) -} - func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes { return cli.queueRequest(types.ToRequestDeliverTx(req)) } @@ -266,6 +261,22 @@ func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { return cli.queueRequest(types.ToRequestEndBlock(req)) } +func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { + return cli.queueRequest(types.ToRequestListSnapshots(req)) +} + +func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { + return cli.queueRequest(types.ToRequestOfferSnapshot(req)) +} + +func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { + return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) +} + +func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { + return cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) +} + //---------------------------------------- func (cli *socketClient) FlushSync() error { @@ -279,64 +290,122 @@ func (cli *socketClient) FlushSync() error { func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) { reqres := cli.queueRequest(types.ToRequestEcho(msg)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetEcho(), cli.Error() } func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { reqres := cli.queueRequest(types.ToRequestInfo(req)) - cli.FlushSync() - return reqres.Response.GetInfo(), cli.Error() -} + if err := cli.FlushSync(); err != nil { + return nil, err + } -func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) { - reqres := cli.queueRequest(types.ToRequestSetOption(req)) - cli.FlushSync() - return reqres.Response.GetSetOption(), cli.Error() + return reqres.Response.GetInfo(), cli.Error() } func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { reqres := cli.queueRequest(types.ToRequestDeliverTx(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetDeliverTx(), cli.Error() } func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { reqres := cli.queueRequest(types.ToRequestCheckTx(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetCheckTx(), cli.Error() } func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) { reqres := cli.queueRequest(types.ToRequestQuery(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetQuery(), cli.Error() } func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) { reqres := cli.queueRequest(types.ToRequestCommit()) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetCommit(), cli.Error() } func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { reqres := cli.queueRequest(types.ToRequestInitChain(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetInitChain(), cli.Error() } func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { reqres := cli.queueRequest(types.ToRequestBeginBlock(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetBeginBlock(), cli.Error() } func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { reqres := cli.queueRequest(types.ToRequestEndBlock(req)) - cli.FlushSync() + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetEndBlock(), cli.Error() } +func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + reqres := cli.queueRequest(types.ToRequestListSnapshots(req)) + if err := cli.FlushSync(); err != nil { + return nil, err + } + + return reqres.Response.GetListSnapshots(), cli.Error() +} + +func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req)) + if err := cli.FlushSync(); err != nil { + return nil, err + } + + return reqres.Response.GetOfferSnapshot(), cli.Error() +} + +func (cli *socketClient) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) + if err := cli.FlushSync(); err != nil { + return nil, err + } + + return reqres.Response.GetLoadSnapshotChunk(), cli.Error() +} + +func (cli *socketClient) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) + if err := cli.FlushSync(); err != nil { + return nil, err + } + return reqres.Response.GetApplySnapshotChunk(), cli.Error() +} + //---------------------------------------- func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { @@ -357,6 +426,9 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { } func (cli *socketClient) flushQueue() { + cli.mtx.Lock() + defer cli.mtx.Unlock() + // mark all in-flight messages as resolved (they will get cli.Error()) for req := cli.reqSent.Front(); req != nil; req = req.Next() { reqres := req.Value.(*ReqRes) @@ -385,8 +457,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_Flush) case *types.Request_Info: _, ok = res.Value.(*types.Response_Info) - case *types.Request_SetOption: - _, ok = res.Value.(*types.Response_SetOption) case *types.Request_DeliverTx: _, ok = res.Value.(*types.Response_DeliverTx) case *types.Request_CheckTx: @@ -401,6 +471,31 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { _, ok = res.Value.(*types.Response_BeginBlock) case *types.Request_EndBlock: _, ok = res.Value.(*types.Response_EndBlock) + case *types.Request_ApplySnapshotChunk: + _, ok = res.Value.(*types.Response_ApplySnapshotChunk) + case *types.Request_LoadSnapshotChunk: + _, ok = res.Value.(*types.Response_LoadSnapshotChunk) + case *types.Request_ListSnapshots: + _, ok = res.Value.(*types.Response_ListSnapshots) + case *types.Request_OfferSnapshot: + _, ok = res.Value.(*types.Response_OfferSnapshot) } return ok } + +func (cli *socketClient) stopForError(err error) { + if !cli.IsRunning() { + return + } + + cli.mtx.Lock() + if cli.err == nil { + cli.err = err + } + cli.mtx.Unlock() + + cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error())) + if err := cli.Stop(); err != nil { + cli.Logger.Error("Error stopping abci.socketClient", "err", err) + } +} diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index 37bc2b57a..90b894b71 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -1,7 +1,6 @@ package abcicli_test import ( - "errors" "fmt" "testing" "time" @@ -16,41 +15,27 @@ import ( "github.com/tendermint/tendermint/libs/service" ) -type errorStopper interface { - StopForError(error) -} - -func TestSocketClientStopForErrorDeadlock(t *testing.T) { - c := abcicli.NewSocketClient(":80", false).(errorStopper) - err := errors.New("foo-tendermint") - - // See Issue https://github.com/tendermint/abci/issues/114 - doneChan := make(chan bool) - go func() { - defer close(doneChan) - c.StopForError(err) - c.StopForError(err) - }() - - select { - case <-doneChan: - case <-time.After(time.Second * 4): - t.Fatalf("Test took too long, potential deadlock still exists") - } -} - func TestProperSyncCalls(t *testing.T) { app := slowApp{} s, c := setupClientServer(t, app) - defer s.Stop() - defer c.Stop() + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) resp := make(chan error, 1) go func() { // This is BeginBlockSync unrolled.... reqres := c.BeginBlockAsync(types.RequestBeginBlock{}) - c.FlushSync() + err := c.FlushSync() + require.NoError(t, err) res := reqres.Response.GetBeginBlock() require.NotNil(t, res) resp <- c.Error() @@ -69,8 +54,16 @@ func TestHangingSyncCalls(t *testing.T) { app := slowApp{} s, c := setupClientServer(t, app) - defer s.Stop() - defer c.Stop() + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Log(err) + } + }) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Log(err) + } + }) resp := make(chan error, 1) go func() { @@ -81,7 +74,8 @@ func TestHangingSyncCalls(t *testing.T) { // no response yet from server time.Sleep(20 * time.Millisecond) // kill the server, so the connections break - s.Stop() + err := s.Stop() + require.NoError(t, err) // wait for the response from BeginBlock reqres.Wait() diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index d5a9aca27..128403162 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -22,7 +22,7 @@ import ( servertest "github.com/tendermint/tendermint/abci/tests/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/version" - "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/proto/tendermint/crypto" ) // client is a global variable so it can be reused by the console @@ -98,10 +98,10 @@ type response struct { } type queryResponse struct { - Key []byte - Value []byte - Height int64 - Proof *merkle.Proof + Key []byte + Value []byte + Height int64 + ProofOps *crypto.ProofOps } func Execute() error { @@ -148,7 +148,6 @@ func addCommands() { RootCmd.AddCommand(consoleCmd) RootCmd.AddCommand(echoCmd) RootCmd.AddCommand(infoCmd) - RootCmd.AddCommand(setOptionCmd) RootCmd.AddCommand(deliverTxCmd) RootCmd.AddCommand(checkTxCmd) RootCmd.AddCommand(commitCmd) @@ -176,7 +175,6 @@ you'd like to run: where example.file looks something like: - set_option serial on check_tx 0x00 check_tx 0xff deliver_tx 0x00 @@ -198,7 +196,7 @@ This command opens an interactive console for running any of the other commands without opening a new connection each time `, Args: cobra.ExactArgs(0), - ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"}, + ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"}, RunE: cmdConsole, } @@ -216,13 +214,6 @@ var infoCmd = &cobra.Command{ Args: cobra.ExactArgs(0), RunE: cmdInfo, } -var setOptionCmd = &cobra.Command{ - Use: "set_option", - Short: "set an option on the application", - Long: "set an option on the application", - Args: cobra.ExactArgs(2), - RunE: cmdSetOption, -} var deliverTxCmd = &cobra.Command{ Use: "deliver_tx", @@ -324,7 +315,6 @@ func cmdTest(cmd *cobra.Command, args []string) error { return compose( []func() error{ func() error { return servertest.InitChain(client) }, - func() error { return servertest.SetOption(client, "serial", "on") }, func() error { return servertest.Commit(client, nil) }, func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) }, func() error { return servertest.Commit(client, nil) }, @@ -439,8 +429,6 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { return cmdInfo(cmd, actualArgs) case "query": return cmdQuery(cmd, actualArgs) - case "set_option": - return cmdSetOption(cmd, actualArgs) default: return cmdUnimplemented(cmd, pArgs) } @@ -464,7 +452,6 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short) - fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short) fmt.Println("Use \"[command] --help\" for more information about a command.") return nil @@ -504,25 +491,6 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 -// Set an option on the application -func cmdSetOption(cmd *cobra.Command, args []string) error { - if len(args) < 2 { - printResponse(cmd, args, response{ - Code: codeBad, - Log: "want at least arguments of the form: ", - }) - return nil - } - - key, val := args[0], args[1] - _, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val}) - if err != nil { - return err - } - printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show... - return nil -} - // Append a new tx to application func cmdDeliverTx(cmd *cobra.Command, args []string) error { if len(args) == 0 { @@ -616,10 +584,10 @@ func cmdQuery(cmd *cobra.Command, args []string) error { Info: resQuery.Info, Log: resQuery.Log, Query: &queryResponse{ - Key: resQuery.Key, - Value: resQuery.Value, - Height: resQuery.Height, - Proof: resQuery.Proof, + Key: resQuery.Key, + Value: resQuery.Value, + Height: resQuery.Height, + ProofOps: resQuery.ProofOps, }, }) return nil @@ -642,7 +610,9 @@ func cmdCounter(cmd *cobra.Command, args []string) error { // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() { // Cleanup - srv.Stop() + if err := srv.Stop(); err != nil { + logger.Error("Error while stopping server", "err", err) + } }) // Run forever. @@ -674,7 +644,9 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() { // Cleanup - srv.Stop() + if err := srv.Stop(); err != nil { + logger.Error("Error while stopping server", "err", err) + } }) // Run forever. @@ -719,8 +691,8 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) { fmt.Printf("-> value: %s\n", rsp.Query.Value) fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) } - if rsp.Query.Proof != nil { - fmt.Printf("-> proof: %#v\n", rsp.Query.Proof) + if rsp.Query.ProofOps != nil { + fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps) } } } diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 58f8aabb9..221fb12bd 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -24,24 +24,6 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo { return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} } -func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption { - key, value := req.Key, req.Value - if key == "serial" && value == "on" { - app.serial = true - } else { - /* - TODO Panic and have the ABCI server pass an exception. - The client can call SetOptionSync() and get an `error`. - return types.ResponseSetOption{ - Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value), - } - */ - return types.ResponseSetOption{} - } - - return types.ResponseSetOption{} -} - func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { if app.serial { if len(req.Tx) > 8 { @@ -69,6 +51,7 @@ func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))} } + tx8 := make([]byte, 8) copy(tx8[len(tx8)-len(req.Tx):], req.Tx) txValue := binary.BigEndian.Uint64(tx8) diff --git a/abci/example/example_test.go b/abci/example/example_test.go index d40976015..24641e11c 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -2,7 +2,9 @@ package example import ( "fmt" + "math/rand" "net" + "os" "reflect" "testing" "time" @@ -23,6 +25,10 @@ import ( "github.com/tendermint/tendermint/abci/types" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + func TestKVStore(t *testing.T) { fmt.Println("### Testing KVStore") testStream(t, kvstore.NewApplication()) @@ -40,22 +46,33 @@ func TestGRPC(t *testing.T) { func testStream(t *testing.T, app types.Application) { numDeliverTxs := 20000 + socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) + defer os.Remove(socketFile) + socket := fmt.Sprintf("unix://%v", socketFile) // Start the listener - server := abciserver.NewSocketServer("unix://test.sock", app) + server := abciserver.NewSocketServer(socket, app) server.SetLogger(log.TestingLogger().With("module", "abci-server")) if err := server.Start(); err != nil { require.NoError(t, err, "Error starting socket server") } - defer server.Stop() + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) // Connect to the socket - client := abcicli.NewSocketClient("unix://test.sock", false) + client := abcicli.NewSocketClient(socket, false) client.SetLogger(log.TestingLogger().With("module", "abci-client")) if err := client.Start(); err != nil { t.Fatalf("Error starting socket client: %v", err.Error()) } - defer client.Stop() + t.Cleanup(func() { + if err := client.Stop(); err != nil { + t.Error(err) + } + }) done := make(chan struct{}) counter := 0 @@ -113,21 +130,34 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { numDeliverTxs := 2000 + socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) + defer os.Remove(socketFile) + socket := fmt.Sprintf("unix://%v", socketFile) // Start the listener - server := abciserver.NewGRPCServer("unix://test.sock", app) + server := abciserver.NewGRPCServer(socket, app) server.SetLogger(log.TestingLogger().With("module", "abci-server")) if err := server.Start(); err != nil { t.Fatalf("Error starting GRPC server: %v", err.Error()) } - defer server.Stop() + + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) // Connect to the socket - conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) + conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) if err != nil { t.Fatalf("Error dialing GRPC server: %v", err.Error()) } - defer conn.Close() + + t.Cleanup(func() { + if err := conn.Close(); err != nil { + t.Error(err) + } + }) client := types.NewABCIApplicationClient(conn) diff --git a/abci/example/js/.gitignore b/abci/example/js/.gitignore deleted file mode 100644 index 3c3629e64..000000000 --- a/abci/example/js/.gitignore +++ /dev/null @@ -1 +0,0 @@ -node_modules diff --git a/abci/example/js/README.md b/abci/example/js/README.md deleted file mode 100644 index 1bef9cbf5..000000000 --- a/abci/example/js/README.md +++ /dev/null @@ -1 +0,0 @@ -This example has been moved here: https://github.com/tendermint/js-abci/tree/master/example diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index bed81a598..edc2c47a5 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -21,11 +21,10 @@ The state is persisted in leveldb along with the last block committed, and the Handshake allows any necessary blocks to be replayed. Validator set changes are effected using the following transaction format: -``` +```md "val:pubkey1!power1,pubkey2!power2,pubkey3!power3" ``` where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one). To remove a validator from the validator set, set power to `0`. There is no sybil protection against new validators joining. - diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index d1334b312..e59fee279 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -10,7 +10,7 @@ import ( func RandVal(i int) types.ValidatorUpdate { pubkey := tmrand.Bytes(32) power := tmrand.Uint16() + 1 - v := types.Ed25519ValidatorUpdate(pubkey, int64(power)) + v := types.UpdateValidator(pubkey, int64(power), "") return v } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 42f00231f..8b851ca9a 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/version" ) @@ -18,7 +17,7 @@ var ( stateKey = []byte("stateKey") kvPairPrefixKey = []byte("kvPairKey:") - ProtocolVersion version.Protocol = 0x1 + ProtocolVersion uint64 = 0x1 ) type State struct { @@ -50,7 +49,10 @@ func saveState(state State) { if err != nil { panic(err) } - state.db.Set(stateKey, stateBytes) + err = state.db.Set(stateKey, stateBytes) + if err != nil { + panic(err) + } } func prefixKey(key []byte) []byte { @@ -77,7 +79,7 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) return types.ResponseInfo{ Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), Version: version.ABCIVersion, - AppVersion: ProtocolVersion.Uint64(), + AppVersion: ProtocolVersion, LastBlockHeight: app.state.Height, LastBlockAppHash: app.state.AppHash, } @@ -93,15 +95,20 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli key, value = req.Tx, req.Tx } - app.state.db.Set(prefixKey(key), value) + err := app.state.db.Set(prefixKey(key), value) + if err != nil { + panic(err) + } app.state.Size++ events := []types.Event{ { Type: "app", - Attributes: []kv.Pair{ - {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")}, - {Key: []byte("key"), Value: key}, + Attributes: []types.EventAttribute{ + {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true}, + {Key: []byte("key"), Value: key, Index: true}, + {Key: []byte("index_key"), Value: []byte("index is working"), Index: true}, + {Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false}, }, }, } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 4d8c829ad..2269ec228 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -1,7 +1,6 @@ package kvstore import ( - "bytes" "fmt" "io/ioutil" "sort" @@ -16,6 +15,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -103,7 +103,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { // make and apply block height = int64(1) hash := []byte("foo") - header := types.Header{ + header := tmproto.Header{ Height: height, } kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) @@ -129,7 +129,7 @@ func TestValUpdates(t *testing.T) { total := 10 nInit := 5 vals := RandVals(total) - // iniitalize with the first nInit + // initialize with the first nInit kvstore.InitChain(types.RequestInitChain{ Validators: vals[:nInit], }) @@ -193,7 +193,7 @@ func makeApplyBlock( // make and apply block height := int64(heightInt) hash := []byte("foo") - header := types.Header{ + header := tmproto.Header{ Height: height, } @@ -219,7 +219,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { sort.Sort(types.ValidatorUpdates(vals2)) for i, v1 := range vals1 { v2 := vals2[i] - if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) || + if !v1.PubKey.Equal(v2.PubKey) || v1.Power != v2.Power { t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power) } @@ -241,7 +241,9 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, client := abcicli.NewSocketClient(socket, false) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { - server.Stop() + if err = server.Stop(); err != nil { + return nil, nil, err + } return nil, nil, err } @@ -263,7 +265,9 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, s client := abcicli.NewGRPCClient(socket, true) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { - server.Stop() + if err := server.Stop(); err != nil { + return nil, nil, err + } return nil, nil, err } return client, server, nil @@ -273,18 +277,35 @@ func TestClientServer(t *testing.T) { // set up socket app kvstore := NewApplication() client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") - require.Nil(t, err) - defer server.Stop() - defer client.Stop() + require.NoError(t, err) + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := client.Stop(); err != nil { + t.Error(err) + } + }) runClientTests(t, client) // set up grpc app kvstore = NewApplication() gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc") - require.Nil(t, err) - defer gserver.Stop() - defer gclient.Stop() + require.NoError(t, err) + + t.Cleanup(func() { + if err := gserver.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := gclient.Stop(); err != nil { + t.Error(err) + } + }) runClientTests(t, gclient) } diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index fffc617be..d40983f85 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" - tmtypes "github.com/tendermint/tendermint/types" + pc "github.com/tendermint/tendermint/proto/tendermint/crypto" ) const ( @@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct { // validator set ValUpdates []types.ValidatorUpdate - valAddrToPubKeyMap map[string]types.PubKey + valAddrToPubKeyMap map[string]pc.PublicKey logger log.Logger } @@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication return &PersistentKVStoreApplication{ app: &Application{state: state}, - valAddrToPubKeyMap: make(map[string]types.PubKey), + valAddrToPubKeyMap: make(map[string]pc.PublicKey), logger: log.NewNopLogger(), } } @@ -62,10 +62,6 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo return res } -func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption { - return app.app.SetOption(req) -} - // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { // if it starts with "val:", update the validator set @@ -124,18 +120,24 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) // reset valset changes app.ValUpdates = make([]types.ValidatorUpdate, 0) + // Punish validators who committed equivocation. for _, ev := range req.ByzantineValidators { - if ev.Type == tmtypes.ABCIEvidenceTypeDuplicateVote { - // decrease voting power by 1 - if ev.TotalVotingPower == 0 { - continue + if ev.Type == types.EvidenceType_DUPLICATE_VOTE { + addr := string(ev.Validator.Address) + if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok { + app.updateValidator(types.ValidatorUpdate{ + PubKey: pubKey, + Power: ev.Validator.Power - 1, + }) + app.logger.Info("Decreased val power by 1 because of the equivocation", + "val", addr) + } else { + app.logger.Error("Wanted to punish val, but can't find it", + "val", addr) } - app.updateValidator(types.ValidatorUpdate{ - PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)], - Power: ev.TotalVotingPower - 1, - }) } } + return types.ResponseBeginBlock{} } @@ -144,6 +146,26 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} } +func (app *PersistentKVStoreApplication) ListSnapshots( + req types.RequestListSnapshots) types.ResponseListSnapshots { + return types.ResponseListSnapshots{} +} + +func (app *PersistentKVStoreApplication) LoadSnapshotChunk( + req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { + return types.ResponseLoadSnapshotChunk{} +} + +func (app *PersistentKVStoreApplication) OfferSnapshot( + req types.RequestOfferSnapshot) types.ResponseOfferSnapshot { + return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT} +} + +func (app *PersistentKVStoreApplication) ApplySnapshotChunk( + req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { + return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT} +} + //--------------------------------------------- // update validators @@ -162,11 +184,18 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida validators = append(validators, *validator) } } + if err = itr.Error(); err != nil { + panic(err) + } return } -func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte { - pubStr := base64.StdEncoding.EncodeToString(pubkey.Data) +func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte { + pk, err := cryptoenc.PubKeyFromProto(pubkey) + if err != nil { + panic(err) + } + pubStr := base64.StdEncoding.EncodeToString(pk.Bytes()) return []byte(fmt.Sprintf("val:%s!%d", pubStr, power)) } @@ -179,7 +208,7 @@ func isValidatorTx(tx []byte) bool { func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx { tx = tx[len(ValidatorSetChangePrefix):] - //get the pubkey and power + // get the pubkey and power pubKeyAndPower := strings.Split(string(tx), "!") if len(pubKeyAndPower) != 2 { return types.ResponseDeliverTx{ @@ -205,15 +234,16 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // update - return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power)) + return app.updateValidator(types.UpdateValidator(pubkey, power, "")) } // add, update, or remove a validator func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { - key := []byte("val:" + string(v.PubKey.Data)) - - pubkey := ed25519.PubKeyEd25519{} - copy(pubkey[:], v.PubKey.Data) + pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey) + if err != nil { + panic(fmt.Errorf("can't decode public key: %w", err)) + } + key := []byte("val:" + string(pubkey.Bytes())) if v.Power == 0 { // remove validator @@ -222,12 +252,14 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate panic(err) } if !hasKey { - pubStr := base64.StdEncoding.EncodeToString(v.PubKey.Data) + pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes()) return types.ResponseDeliverTx{ Code: code.CodeTypeUnauthorized, Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)} } - app.app.state.db.Delete(key) + if err = app.app.state.db.Delete(key); err != nil { + panic(err) + } delete(app.valAddrToPubKeyMap, string(pubkey.Address())) } else { // add or update validator @@ -237,7 +269,9 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Error encoding validator: %v", err)} } - app.app.state.db.Set(key, value.Bytes()) + if err = app.app.state.db.Set(key, value.Bytes()); err != nil { + panic(err) + } app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey } diff --git a/abci/example/python/abci/__init__.py b/abci/example/python/abci/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/abci/example/python/abci/msg.py b/abci/example/python/abci/msg.py deleted file mode 100644 index 7329f5852..000000000 --- a/abci/example/python/abci/msg.py +++ /dev/null @@ -1,50 +0,0 @@ -from wire import decode_string - -# map type_byte to message name -message_types = { - 0x01: "echo", - 0x02: "flush", - 0x03: "info", - 0x04: "set_option", - 0x21: "deliver_tx", - 0x22: "check_tx", - 0x23: "commit", - 0x24: "add_listener", - 0x25: "rm_listener", -} - -# return the decoded arguments of abci messages - -class RequestDecoder(): - - def __init__(self, reader): - self.reader = reader - - def echo(self): - return decode_string(self.reader) - - def flush(self): - return - - def info(self): - return - - def set_option(self): - return decode_string(self.reader), decode_string(self.reader) - - def deliver_tx(self): - return decode_string(self.reader) - - def check_tx(self): - return decode_string(self.reader) - - def commit(self): - return - - def add_listener(self): - # TODO - return - - def rm_listener(self): - # TODO - return diff --git a/abci/example/python/abci/reader.py b/abci/example/python/abci/reader.py deleted file mode 100644 index 6c0dad94e..000000000 --- a/abci/example/python/abci/reader.py +++ /dev/null @@ -1,56 +0,0 @@ - -# Simple read() method around a bytearray - - -class BytesBuffer(): - - def __init__(self, b): - self.buf = b - self.readCount = 0 - - def count(self): - return self.readCount - - def reset_count(self): - self.readCount = 0 - - def size(self): - return len(self.buf) - - def peek(self): - return self.buf[0] - - def write(self, b): - # b should be castable to byte array - self.buf += bytearray(b) - - def read(self, n): - if len(self.buf) < n: - print "reader err: buf less than n" - # TODO: exception - return - self.readCount += n - r = self.buf[:n] - self.buf = self.buf[n:] - return r - -# Buffer bytes off a tcp connection and read them off in chunks - - -class ConnReader(): - - def __init__(self, conn): - self.conn = conn - self.buf = bytearray() - - # blocking - def read(self, n): - while n > len(self.buf): - moreBuf = self.conn.recv(1024) - if not moreBuf: - raise IOError("dead connection") - self.buf = self.buf + bytearray(moreBuf) - - r = self.buf[:n] - self.buf = self.buf[n:] - return r diff --git a/abci/example/python/abci/server.py b/abci/example/python/abci/server.py deleted file mode 100644 index 40d50896c..000000000 --- a/abci/example/python/abci/server.py +++ /dev/null @@ -1,202 +0,0 @@ -import socket -import select -import sys - -from wire import decode_varint, encode -from reader import BytesBuffer -from msg import RequestDecoder, message_types - -# hold the asyncronous state of a connection -# ie. we may not get enough bytes on one read to decode the message - -class Connection(): - - def __init__(self, fd, app): - self.fd = fd - self.app = app - self.recBuf = BytesBuffer(bytearray()) - self.resBuf = BytesBuffer(bytearray()) - self.msgLength = 0 - self.decoder = RequestDecoder(self.recBuf) - self.inProgress = False # are we in the middle of a message - - def recv(this): - data = this.fd.recv(1024) - if not data: # what about len(data) == 0 - raise IOError("dead connection") - this.recBuf.write(data) - -# ABCI server responds to messges by calling methods on the app - -class ABCIServer(): - - def __init__(self, app, port=5410): - self.app = app - # map conn file descriptors to (app, reqBuf, resBuf, msgDecoder) - self.appMap = {} - - self.port = port - self.listen_backlog = 10 - - self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.listener.setblocking(0) - self.listener.bind(('', port)) - - self.listener.listen(self.listen_backlog) - - self.shutdown = False - - self.read_list = [self.listener] - self.write_list = [] - - def handle_new_connection(self, r): - new_fd, new_addr = r.accept() - new_fd.setblocking(0) # non-blocking - self.read_list.append(new_fd) - self.write_list.append(new_fd) - print 'new connection to', new_addr - - self.appMap[new_fd] = Connection(new_fd, self.app) - - def handle_conn_closed(self, r): - self.read_list.remove(r) - self.write_list.remove(r) - r.close() - print "connection closed" - - def handle_recv(self, r): - # app, recBuf, resBuf, conn - conn = self.appMap[r] - while True: - try: - print "recv loop" - # check if we need more data first - if conn.inProgress: - if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength): - conn.recv() - else: - if conn.recBuf.size() == 0: - conn.recv() - - conn.inProgress = True - - # see if we have enough to get the message length - if conn.msgLength == 0: - ll = conn.recBuf.peek() - if conn.recBuf.size() < 1 + ll: - # we don't have enough bytes to read the length yet - return - print "decoding msg length" - conn.msgLength = decode_varint(conn.recBuf) - - # see if we have enough to decode the message - if conn.recBuf.size() < conn.msgLength: - return - - # now we can decode the message - - # first read the request type and get the particular msg - # decoder - typeByte = conn.recBuf.read(1) - typeByte = int(typeByte[0]) - resTypeByte = typeByte + 0x10 - req_type = message_types[typeByte] - - if req_type == "flush": - # messages are length prefixed - conn.resBuf.write(encode(1)) - conn.resBuf.write([resTypeByte]) - conn.fd.send(str(conn.resBuf.buf)) - conn.msgLength = 0 - conn.inProgress = False - conn.resBuf = BytesBuffer(bytearray()) - return - - decoder = getattr(conn.decoder, req_type) - - print "decoding args" - req_args = decoder() - print "got args", req_args - - # done decoding message - conn.msgLength = 0 - conn.inProgress = False - - req_f = getattr(conn.app, req_type) - if req_args is None: - res = req_f() - elif isinstance(req_args, tuple): - res = req_f(*req_args) - else: - res = req_f(req_args) - - if isinstance(res, tuple): - res, ret_code = res - else: - ret_code = res - res = None - - print "called", req_type, "ret code:", ret_code - if ret_code != 0: - print "non-zero retcode:", ret_code - - if req_type in ("echo", "info"): # these dont return a ret code - enc = encode(res) - # messages are length prefixed - conn.resBuf.write(encode(len(enc) + 1)) - conn.resBuf.write([resTypeByte]) - conn.resBuf.write(enc) - else: - enc, encRet = encode(res), encode(ret_code) - # messages are length prefixed - conn.resBuf.write(encode(len(enc) + len(encRet) + 1)) - conn.resBuf.write([resTypeByte]) - conn.resBuf.write(encRet) - conn.resBuf.write(enc) - except TypeError as e: - print "TypeError on reading from connection:", e - self.handle_conn_closed(r) - return - except ValueError as e: - print "ValueError on reading from connection:", e - self.handle_conn_closed(r) - return - except IOError as e: - print "IOError on reading from connection:", e - self.handle_conn_closed(r) - return - except Exception as e: - # sys.exc_info()[0] # TODO better - print "error reading from connection", str(e) - self.handle_conn_closed(r) - return - - def main_loop(self): - while not self.shutdown: - r_list, w_list, _ = select.select( - self.read_list, self.write_list, [], 2.5) - - for r in r_list: - if (r == self.listener): - try: - self.handle_new_connection(r) - # undo adding to read list ... - except NameError as e: - print "Could not connect due to NameError:", e - except TypeError as e: - print "Could not connect due to TypeError:", e - except: - print "Could not connect due to unexpected error:", sys.exc_info()[0] - else: - self.handle_recv(r) - - def handle_shutdown(self): - for r in self.read_list: - r.close() - for w in self.write_list: - try: - w.close() - except Exception as e: - print(e) # TODO: add logging - self.shutdown = True diff --git a/abci/example/python/abci/wire.py b/abci/example/python/abci/wire.py deleted file mode 100644 index 1a07e89f1..000000000 --- a/abci/example/python/abci/wire.py +++ /dev/null @@ -1,115 +0,0 @@ - -# the decoder works off a reader -# the encoder returns bytearray - - -def hex2bytes(h): - return bytearray(h.decode('hex')) - - -def bytes2hex(b): - if type(b) in (str, unicode): - return "".join([hex(ord(c))[2:].zfill(2) for c in b]) - else: - return bytes2hex(b.decode()) - - -# expects uvarint64 (no crazy big nums!) -def uvarint_size(i): - if i == 0: - return 0 - for j in xrange(1, 8): - if i < 1 << j * 8: - return j - return 8 - -# expects i < 2**size - - -def encode_big_endian(i, size): - if size == 0: - return bytearray() - return encode_big_endian(i / 256, size - 1) + bytearray([i % 256]) - - -def decode_big_endian(reader, size): - if size == 0: - return 0 - firstByte = reader.read(1)[0] - return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1) - -# ints are max 16 bytes long - - -def encode_varint(i): - negate = False - if i < 0: - negate = True - i = -i - size = uvarint_size(i) - if size == 0: - return bytearray([0]) - big_end = encode_big_endian(i, size) - if negate: - size += 0xF0 - return bytearray([size]) + big_end - -# returns the int and whats left of the byte array - - -def decode_varint(reader): - size = reader.read(1)[0] - if size == 0: - return 0 - - negate = True if size > int(0xF0) else False - if negate: - size = size - 0xF0 - i = decode_big_endian(reader, size) - if negate: - i = i * (-1) - return i - - -def encode_string(s): - size = encode_varint(len(s)) - return size + bytearray(s) - - -def decode_string(reader): - length = decode_varint(reader) - return str(reader.read(length)) - - -def encode_list(s): - b = bytearray() - map(b.extend, map(encode, s)) - return encode_varint(len(s)) + b - - -def encode(s): - if s is None: - return bytearray() - if isinstance(s, int): - return encode_varint(s) - elif isinstance(s, str): - return encode_string(s) - elif isinstance(s, list): - return encode_list(s) - else: - print "UNSUPPORTED TYPE!", type(s), s - - -if __name__ == '__main__': - ns = [100, 100, 1000, 256] - ss = [2, 5, 5, 2] - bs = map(encode_big_endian, ns, ss) - ds = map(decode_big_endian, bs, ss) - print ns - print [i[0] for i in ds] - - ss = ["abc", "hi there jim", "ok now what"] - e = map(encode_string, ss) - d = map(decode_string, e) - print ss - print [i[0] for i in d] diff --git a/abci/example/python/app.py b/abci/example/python/app.py deleted file mode 100644 index 1c041be6c..000000000 --- a/abci/example/python/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import sys - -from abci.wire import hex2bytes, decode_big_endian, encode_big_endian -from abci.server import ABCIServer -from abci.reader import BytesBuffer - - -class CounterApplication(): - - def __init__(self): - sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.") - self.hashCount = 0 - self.txCount = 0 - self.serial = False - - def echo(self, msg): - return msg, 0 - - def info(self): - return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0 - - def set_option(self, key, value): - if key == "serial" and value == "on": - self.serial = True - return 0 - - def deliver_tx(self, txBytes): - if self.serial: - txByteArray = bytearray(txBytes) - if len(txBytes) >= 2 and txBytes[:2] == "0x": - txByteArray = hex2bytes(txBytes[2:]) - txValue = decode_big_endian( - BytesBuffer(txByteArray), len(txBytes)) - if txValue != self.txCount: - return None, 6 - self.txCount += 1 - return None, 0 - - def check_tx(self, txBytes): - if self.serial: - txByteArray = bytearray(txBytes) - if len(txBytes) >= 2 and txBytes[:2] == "0x": - txByteArray = hex2bytes(txBytes[2:]) - txValue = decode_big_endian( - BytesBuffer(txByteArray), len(txBytes)) - if txValue < self.txCount: - return 6 - return 0 - - def commit(self): - self.hashCount += 1 - if self.txCount == 0: - return "", 0 - h = encode_big_endian(self.txCount, 8) - h.reverse() - return str(h), 0 - - def add_listener(self): - return 0 - - def rm_listener(self): - return 0 - - def event(self): - return - - -if __name__ == '__main__': - l = len(sys.argv) - if l == 1: - port = 26658 - elif l == 2: - port = int(sys.argv[1]) - else: - print "too many arguments" - quit() - - print 'ABCI Demo APP (Python)' - - app = CounterApplication() - server = ABCIServer(app, port) - server.main_loop() diff --git a/abci/example/python3/abci/__init__.py b/abci/example/python3/abci/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/abci/example/python3/abci/msg.py b/abci/example/python3/abci/msg.py deleted file mode 100644 index 807c4b6b0..000000000 --- a/abci/example/python3/abci/msg.py +++ /dev/null @@ -1,50 +0,0 @@ -from .wire import decode_string - -# map type_byte to message name -message_types = { - 0x01: "echo", - 0x02: "flush", - 0x03: "info", - 0x04: "set_option", - 0x21: "deliver_tx", - 0x22: "check_tx", - 0x23: "commit", - 0x24: "add_listener", - 0x25: "rm_listener", -} - -# return the decoded arguments of abci messages - -class RequestDecoder(): - - def __init__(self, reader): - self.reader = reader - - def echo(self): - return decode_string(self.reader) - - def flush(self): - return - - def info(self): - return - - def set_option(self): - return decode_string(self.reader), decode_string(self.reader) - - def deliver_tx(self): - return decode_string(self.reader) - - def check_tx(self): - return decode_string(self.reader) - - def commit(self): - return - - def add_listener(self): - # TODO - return - - def rm_listener(self): - # TODO - return diff --git a/abci/example/python3/abci/reader.py b/abci/example/python3/abci/reader.py deleted file mode 100644 index c016ac604..000000000 --- a/abci/example/python3/abci/reader.py +++ /dev/null @@ -1,56 +0,0 @@ - -# Simple read() method around a bytearray - - -class BytesBuffer(): - - def __init__(self, b): - self.buf = b - self.readCount = 0 - - def count(self): - return self.readCount - - def reset_count(self): - self.readCount = 0 - - def size(self): - return len(self.buf) - - def peek(self): - return self.buf[0] - - def write(self, b): - # b should be castable to byte array - self.buf += bytearray(b) - - def read(self, n): - if len(self.buf) < n: - print("reader err: buf less than n") - # TODO: exception - return - self.readCount += n - r = self.buf[:n] - self.buf = self.buf[n:] - return r - -# Buffer bytes off a tcp connection and read them off in chunks - - -class ConnReader(): - - def __init__(self, conn): - self.conn = conn - self.buf = bytearray() - - # blocking - def read(self, n): - while n > len(self.buf): - moreBuf = self.conn.recv(1024) - if not moreBuf: - raise IOError("dead connection") - self.buf = self.buf + bytearray(moreBuf) - - r = self.buf[:n] - self.buf = self.buf[n:] - return r diff --git a/abci/example/python3/abci/server.py b/abci/example/python3/abci/server.py deleted file mode 100644 index 04063262d..000000000 --- a/abci/example/python3/abci/server.py +++ /dev/null @@ -1,196 +0,0 @@ -import socket -import select -import sys -import logging - -from .wire import decode_varint, encode -from .reader import BytesBuffer -from .msg import RequestDecoder, message_types - -# hold the asyncronous state of a connection -# ie. we may not get enough bytes on one read to decode the message - -logger = logging.getLogger(__name__) - -class Connection(): - - def __init__(self, fd, app): - self.fd = fd - self.app = app - self.recBuf = BytesBuffer(bytearray()) - self.resBuf = BytesBuffer(bytearray()) - self.msgLength = 0 - self.decoder = RequestDecoder(self.recBuf) - self.inProgress = False # are we in the middle of a message - - def recv(this): - data = this.fd.recv(1024) - if not data: # what about len(data) == 0 - raise IOError("dead connection") - this.recBuf.write(data) - -# ABCI server responds to messges by calling methods on the app - -class ABCIServer(): - - def __init__(self, app, port=5410): - self.app = app - # map conn file descriptors to (app, reqBuf, resBuf, msgDecoder) - self.appMap = {} - - self.port = port - self.listen_backlog = 10 - - self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.listener.setblocking(0) - self.listener.bind(('', port)) - - self.listener.listen(self.listen_backlog) - - self.shutdown = False - - self.read_list = [self.listener] - self.write_list = [] - - def handle_new_connection(self, r): - new_fd, new_addr = r.accept() - new_fd.setblocking(0) # non-blocking - self.read_list.append(new_fd) - self.write_list.append(new_fd) - print('new connection to', new_addr) - - self.appMap[new_fd] = Connection(new_fd, self.app) - - def handle_conn_closed(self, r): - self.read_list.remove(r) - self.write_list.remove(r) - r.close() - print("connection closed") - - def handle_recv(self, r): - # app, recBuf, resBuf, conn - conn = self.appMap[r] - while True: - try: - print("recv loop") - # check if we need more data first - if conn.inProgress: - if (conn.msgLength == 0 or conn.recBuf.size() < conn.msgLength): - conn.recv() - else: - if conn.recBuf.size() == 0: - conn.recv() - - conn.inProgress = True - - # see if we have enough to get the message length - if conn.msgLength == 0: - ll = conn.recBuf.peek() - if conn.recBuf.size() < 1 + ll: - # we don't have enough bytes to read the length yet - return - print("decoding msg length") - conn.msgLength = decode_varint(conn.recBuf) - - # see if we have enough to decode the message - if conn.recBuf.size() < conn.msgLength: - return - - # now we can decode the message - - # first read the request type and get the particular msg - # decoder - typeByte = conn.recBuf.read(1) - typeByte = int(typeByte[0]) - resTypeByte = typeByte + 0x10 - req_type = message_types[typeByte] - - if req_type == "flush": - # messages are length prefixed - conn.resBuf.write(encode(1)) - conn.resBuf.write([resTypeByte]) - conn.fd.send(conn.resBuf.buf) - conn.msgLength = 0 - conn.inProgress = False - conn.resBuf = BytesBuffer(bytearray()) - return - - decoder = getattr(conn.decoder, req_type) - - print("decoding args") - req_args = decoder() - print("got args", req_args) - - # done decoding message - conn.msgLength = 0 - conn.inProgress = False - - req_f = getattr(conn.app, req_type) - if req_args is None: - res = req_f() - elif isinstance(req_args, tuple): - res = req_f(*req_args) - else: - res = req_f(req_args) - - if isinstance(res, tuple): - res, ret_code = res - else: - ret_code = res - res = None - - print("called", req_type, "ret code:", ret_code, 'res:', res) - if ret_code != 0: - print("non-zero retcode:", ret_code) - - if req_type in ("echo", "info"): # these dont return a ret code - enc = encode(res) - # messages are length prefixed - conn.resBuf.write(encode(len(enc) + 1)) - conn.resBuf.write([resTypeByte]) - conn.resBuf.write(enc) - else: - enc, encRet = encode(res), encode(ret_code) - # messages are length prefixed - conn.resBuf.write(encode(len(enc) + len(encRet) + 1)) - conn.resBuf.write([resTypeByte]) - conn.resBuf.write(encRet) - conn.resBuf.write(enc) - except IOError as e: - print("IOError on reading from connection:", e) - self.handle_conn_closed(r) - return - except Exception as e: - logger.exception("error reading from connection") - self.handle_conn_closed(r) - return - - def main_loop(self): - while not self.shutdown: - r_list, w_list, _ = select.select( - self.read_list, self.write_list, [], 2.5) - - for r in r_list: - if (r == self.listener): - try: - self.handle_new_connection(r) - # undo adding to read list ... - except NameError as e: - print("Could not connect due to NameError:", e) - except TypeError as e: - print("Could not connect due to TypeError:", e) - except: - print("Could not connect due to unexpected error:", sys.exc_info()[0]) - else: - self.handle_recv(r) - - def handle_shutdown(self): - for r in self.read_list: - r.close() - for w in self.write_list: - try: - w.close() - except Exception as e: - print(e) # TODO: add logging - self.shutdown = True diff --git a/abci/example/python3/abci/wire.py b/abci/example/python3/abci/wire.py deleted file mode 100644 index 72f5fab8b..000000000 --- a/abci/example/python3/abci/wire.py +++ /dev/null @@ -1,119 +0,0 @@ - -# the decoder works off a reader -# the encoder returns bytearray - - -def hex2bytes(h): - return bytearray(h.decode('hex')) - - -def bytes2hex(b): - if type(b) in (str, str): - return "".join([hex(ord(c))[2:].zfill(2) for c in b]) - else: - return bytes2hex(b.decode()) - - -# expects uvarint64 (no crazy big nums!) -def uvarint_size(i): - if i == 0: - return 0 - for j in range(1, 8): - if i < 1 << j * 8: - return j - return 8 - -# expects i < 2**size - - -def encode_big_endian(i, size): - if size == 0: - return bytearray() - return encode_big_endian(i // 256, size - 1) + bytearray([i % 256]) - - -def decode_big_endian(reader, size): - if size == 0: - return 0 - firstByte = reader.read(1)[0] - return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1) - -# ints are max 16 bytes long - - -def encode_varint(i): - negate = False - if i < 0: - negate = True - i = -i - size = uvarint_size(i) - if size == 0: - return bytearray([0]) - big_end = encode_big_endian(i, size) - if negate: - size += 0xF0 - return bytearray([size]) + big_end - -# returns the int and whats left of the byte array - - -def decode_varint(reader): - size = reader.read(1)[0] - if size == 0: - return 0 - - negate = True if size > int(0xF0) else False - if negate: - size = size - 0xF0 - i = decode_big_endian(reader, size) - if negate: - i = i * (-1) - return i - - -def encode_string(s): - size = encode_varint(len(s)) - return size + bytearray(s, 'utf8') - - -def decode_string(reader): - length = decode_varint(reader) - raw_data = reader.read(length) - return raw_data.decode() - - -def encode_list(s): - b = bytearray() - list(map(b.extend, list(map(encode, s)))) - return encode_varint(len(s)) + b - - -def encode(s): - print('encoding', repr(s)) - if s is None: - return bytearray() - if isinstance(s, int): - return encode_varint(s) - elif isinstance(s, str): - return encode_string(s) - elif isinstance(s, list): - return encode_list(s) - elif isinstance(s, bytearray): - return encode_string(s) - else: - print("UNSUPPORTED TYPE!", type(s), s) - - -if __name__ == '__main__': - ns = [100, 100, 1000, 256] - ss = [2, 5, 5, 2] - bs = list(map(encode_big_endian, ns, ss)) - ds = list(map(decode_big_endian, bs, ss)) - print(ns) - print([i[0] for i in ds]) - - ss = ["abc", "hi there jim", "ok now what"] - e = list(map(encode_string, ss)) - d = list(map(decode_string, e)) - print(ss) - print([i[0] for i in d]) diff --git a/abci/example/python3/app.py b/abci/example/python3/app.py deleted file mode 100644 index 9f051b1e2..000000000 --- a/abci/example/python3/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import sys - -from abci.wire import hex2bytes, decode_big_endian, encode_big_endian -from abci.server import ABCIServer -from abci.reader import BytesBuffer - - -class CounterApplication(): - - def __init__(self): - sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.") - self.hashCount = 0 - self.txCount = 0 - self.serial = False - - def echo(self, msg): - return msg, 0 - - def info(self): - return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0 - - def set_option(self, key, value): - if key == "serial" and value == "on": - self.serial = True - return 0 - - def deliver_tx(self, txBytes): - if self.serial: - txByteArray = bytearray(txBytes) - if len(txBytes) >= 2 and txBytes[:2] == "0x": - txByteArray = hex2bytes(txBytes[2:]) - txValue = decode_big_endian( - BytesBuffer(txByteArray), len(txBytes)) - if txValue != self.txCount: - return None, 6 - self.txCount += 1 - return None, 0 - - def check_tx(self, txBytes): - if self.serial: - txByteArray = bytearray(txBytes) - if len(txBytes) >= 2 and txBytes[:2] == "0x": - txByteArray = hex2bytes(txBytes[2:]) - txValue = decode_big_endian( - BytesBuffer(txByteArray), len(txBytes)) - if txValue < self.txCount: - return 6 - return 0 - - def commit(self): - self.hashCount += 1 - if self.txCount == 0: - return "", 0 - h = encode_big_endian(self.txCount, 8) - h.reverse() - return h.decode(), 0 - - def add_listener(self): - return 0 - - def rm_listener(self): - return 0 - - def event(self): - return - - -if __name__ == '__main__': - l = len(sys.argv) - if l == 1: - port = 26658 - elif l == 2: - port = int(sys.argv[1]) - else: - print("too many arguments") - quit() - - print('ABCI Demo APP (Python)') - - app = CounterApplication() - server = ABCIServer(app, port) - server.main_loop() diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index b953c404d..503f0b64f 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -34,25 +34,28 @@ func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Se return s } -// OnStart starts the gRPC service +// OnStart starts the gRPC service. func (s *GRPCServer) OnStart() error { - if err := s.BaseService.OnStart(); err != nil { - return err - } + ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } - s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) + s.listener = ln s.server = grpc.NewServer() types.RegisterABCIApplicationServer(s.server, s.app) - go s.server.Serve(s.listener) + + s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) + go func() { + if err := s.server.Serve(s.listener); err != nil { + s.Logger.Error("Error serving gRPC server", "err", err) + } + }() return nil } -// OnStop stops the gRPC server +// OnStop stops the gRPC server. func (s *GRPCServer) OnStop() { - s.BaseService.OnStop() s.server.Stop() } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index efb4d94e0..cf3663d2d 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -5,27 +5,31 @@ import ( "fmt" "io" "net" - "sync" + "os" + "runtime" "github.com/tendermint/tendermint/abci/types" + tmlog "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) // var maxNumberConnections = 2 type SocketServer struct { service.BaseService + isLoggerSet bool proto string addr string listener net.Listener - connsMtx sync.Mutex + connsMtx tmsync.Mutex conns map[int]net.Conn nextConnID int - appMtx sync.Mutex + appMtx tmsync.Mutex app types.Application } @@ -42,21 +46,24 @@ func NewSocketServer(protoAddr string, app types.Application) service.Service { return s } +func (s *SocketServer) SetLogger(l tmlog.Logger) { + s.BaseService.SetLogger(l) + s.isLoggerSet = true +} + func (s *SocketServer) OnStart() error { - if err := s.BaseService.OnStart(); err != nil { - return err - } ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } + s.listener = ln go s.acceptConnectionsRoutine() + return nil } func (s *SocketServer) OnStop() { - s.BaseService.OnStop() if err := s.listener.Close(); err != nil { s.Logger.Error("Error closing listener", "err", err) } @@ -105,7 +112,7 @@ func (s *SocketServer) acceptConnectionsRoutine() { if !s.IsRunning() { return // Ignore error from listener closing. } - s.Logger.Error("Failed to accept connection: " + err.Error()) + s.Logger.Error("Failed to accept connection", "err", err) continue } @@ -132,15 +139,15 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) { case err == io.EOF: s.Logger.Error("Connection was closed by client") case err != nil: - s.Logger.Error("Connection error", "error", err) + s.Logger.Error("Connection error", "err", err) default: // never happens - s.Logger.Error("Connection was closed.") + s.Logger.Error("Connection was closed") } // Close the connection if err := s.rmConn(connID); err != nil { - s.Logger.Error("Error in closing connection", "error", err) + s.Logger.Error("Error closing connection", "err", err) } } @@ -153,7 +160,14 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp // make sure to recover from any app-related panics to allow proper socket cleanup r := recover() if r != nil { - closeConn <- fmt.Errorf("recovered from panic: %v", r) + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err := fmt.Errorf("recovered from panic: %v\n%s", r, buf) + if !s.isLoggerSet { + fmt.Fprintln(os.Stderr, err) + } + closeConn <- err s.appMtx.Unlock() } }() @@ -166,7 +180,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp if err == io.EOF { closeConn <- err } else { - closeConn <- fmt.Errorf("error reading message: %v", err) + closeConn <- fmt.Errorf("error reading message: %w", err) } return } @@ -186,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_Info: res := s.app.Info(*r.Info) responses <- types.ToResponseInfo(res) - case *types.Request_SetOption: - res := s.app.SetOption(*r.SetOption) - responses <- types.ToResponseSetOption(res) case *types.Request_DeliverTx: res := s.app.DeliverTx(*r.DeliverTx) responses <- types.ToResponseDeliverTx(res) @@ -210,6 +221,18 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_EndBlock: res := s.app.EndBlock(*r.EndBlock) responses <- types.ToResponseEndBlock(res) + case *types.Request_ListSnapshots: + res := s.app.ListSnapshots(*r.ListSnapshots) + responses <- types.ToResponseListSnapshots(res) + case *types.Request_OfferSnapshot: + res := s.app.OfferSnapshot(*r.OfferSnapshot) + responses <- types.ToResponseOfferSnapshot(res) + case *types.Request_LoadSnapshotChunk: + res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) + responses <- types.ToResponseLoadSnapshotChunk(res) + case *types.Request_ApplySnapshotChunk: + res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk) + responses <- types.ToResponseApplySnapshotChunk(res) default: responses <- types.ToResponseException("Unknown request") } @@ -223,13 +246,13 @@ func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, res var res = <-responses err := types.WriteMessage(res, bufWriter) if err != nil { - closeConn <- fmt.Errorf("error writing message: %v", err.Error()) + closeConn <- fmt.Errorf("error writing message: %w", err) return } if _, ok := res.Value.(*types.Response_Flush); ok { err = bufWriter.Flush() if err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %v", err.Error()) + closeConn <- fmt.Errorf("error flushing write buffer: %w", err) return } } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 36989f6ac..1a11a9380 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -16,7 +16,7 @@ func InitChain(client abcicli.Client) error { for i := 0; i < total; i++ { pubkey := tmrand.Bytes(33) power := tmrand.Int() - vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power)) + vals[i] = types.UpdateValidator(pubkey, int64(power), "") } _, err := client.InitChainSync(types.RequestInitChain{ Validators: vals, @@ -29,17 +29,6 @@ func InitChain(client abcicli.Client) error { return nil } -func SetOption(client abcicli.Client, key, value string) error { - _, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value}) - if err != nil { - fmt.Println("Failed test: SetOption") - fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err) - return err - } - fmt.Println("Passed test: SetOption") - return nil -} - func Commit(client abcicli.Client, hashExp []byte) error { res, err := client.CommitSync() data := res.Data diff --git a/abci/tests/test_app/app.go b/abci/tests/test_app/app.go index 9c32fcc7d..8876ada48 100644 --- a/abci/tests/test_app/app.go +++ b/abci/tests/test_app/app.go @@ -25,13 +25,6 @@ func startClient(abciType string) abcicli.Client { return client } -func setOption(client abcicli.Client, key, value string) { - _, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value}) - if err != nil { - panicf("setting %v=%v: \nerr: %v", key, value, err) - } -} - func commit(client abcicli.Client, hashExp []byte) { res, err := client.CommitSync() if err != nil { @@ -55,24 +48,6 @@ func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] } } -/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) { - res, err := client.CheckTxSync(txBytes) - if err != nil { - panicf("client error: %v", err) - } - if res.IsErr() { - panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log) - } - if res.Code != codeExp { - panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v", - res.Code, codeExp, res.Log) - } - if !bytes.Equal(res.Data, dataExp) { - panicf("CheckTx response data was unexpected. Got %X expected %X", - res.Data, dataExp) - } -}*/ - func panicf(format string, a ...interface{}) { panic(fmt.Sprintf(format, a...)) } diff --git a/abci/tests/test_app/main.go b/abci/tests/test_app/main.go index ca298d7e2..011793888 100644 --- a/abci/tests/test_app/main.go +++ b/abci/tests/test_app/main.go @@ -7,7 +7,6 @@ import ( "os/exec" "time" - "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" ) @@ -41,7 +40,7 @@ func ensureABCIIsUp(typ string, n int) error { if err == nil { break } - <-time.After(500 * time.Millisecond) + time.Sleep(500 * time.Millisecond) } return err } @@ -59,27 +58,36 @@ func testCounter() { if err := cmd.Start(); err != nil { log.Fatalf("starting %q err: %v", abciApp, err) } - defer cmd.Wait() - defer cmd.Process.Kill() + defer func() { + if err := cmd.Process.Kill(); err != nil { + log.Printf("error on process kill: %v", err) + } + if err := cmd.Wait(); err != nil { + log.Printf("error while waiting for cmd to exit: %v", err) + } + }() if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil { - log.Fatalf("echo failed: %v", err) + log.Fatalf("echo failed: %v", err) //nolint:gocritic } client := startClient(abciType) - defer client.Stop() + defer func() { + if err := client.Stop(); err != nil { + log.Printf("error trying client stop: %v", err) + } + }() - setOption(client, "serial", "on") - commit(client, nil) - deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) + // commit(client, nil) + // deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) commit(client, nil) deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil) commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) - deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) + // deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil) deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil) deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil) deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil) - deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) + // deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil) commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) } diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci index 3b435f22a..965ca842c 100644 --- a/abci/tests/test_cli/ex2.abci +++ b/abci/tests/test_cli/ex2.abci @@ -1,4 +1,3 @@ -set_option serial on check_tx 0x00 check_tx 0xff deliver_tx 0x00 diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index 5bceb85d8..7ef8abbc4 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -1,7 +1,3 @@ -> set_option serial on --> code: OK --> log: OK (SetOption doesn't return anything.) - > check_tx 0x00 -> code: OK @@ -12,18 +8,16 @@ -> code: OK > check_tx 0x00 --> code: 2 --> log: Invalid nonce. Expected >= 1, got 0 +-> code: OK > deliver_tx 0x01 -> code: OK > deliver_tx 0x04 --> code: 2 --> log: Invalid nonce. Expected 2, got 4 +-> code: OK > info -> code: OK --> data: {"hashes":0,"txs":2} --> data.hex: 0x7B22686173686573223A302C22747873223A327D +-> data: {"hashes":0,"txs":3} +-> data.hex: 0x7B22686173686573223A302C22747873223A337D diff --git a/abci/types/application.go b/abci/types/application.go index 9dd77c4ef..5b8270ba6 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -1,4 +1,4 @@ -package types // nolint: goimports +package types import ( context "golang.org/x/net/context" @@ -10,9 +10,8 @@ import ( // except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing. type Application interface { // Info/Query Connection - Info(RequestInfo) ResponseInfo // Return application info - SetOption(RequestSetOption) ResponseSetOption // Set application option - Query(RequestQuery) ResponseQuery // Query for state + Info(RequestInfo) ResponseInfo // Return application info + Query(RequestQuery) ResponseQuery // Query for state // Mempool Connection CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool @@ -23,6 +22,12 @@ type Application interface { DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set Commit() ResponseCommit // Commit the state and return the application Merkle root hash + + // State Sync Connection + ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots + OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application + LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk + ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk } //------------------------------------------------------- @@ -41,10 +46,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo { return ResponseInfo{} } -func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption { - return ResponseSetOption{} -} - func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx { return ResponseDeliverTx{Code: CodeTypeOK} } @@ -73,6 +74,22 @@ func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock { return ResponseEndBlock{} } +func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots { + return ResponseListSnapshots{} +} + +func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot { + return ResponseOfferSnapshot{} +} + +func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk { + return ResponseLoadSnapshotChunk{} +} + +func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk { + return ResponseApplySnapshotChunk{} +} + //------------------------------------------------------- // GRPCApplication is a GRPC wrapper for Application @@ -97,11 +114,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon return &res, nil } -func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) { - res := app.app.SetOption(*req) - return &res, nil -} - func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { res := app.app.DeliverTx(*req) return &res, nil @@ -136,3 +148,27 @@ func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) res := app.app.EndBlock(*req) return &res, nil } + +func (app *GRPCApplication) ListSnapshots( + ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + res := app.app.ListSnapshots(*req) + return &res, nil +} + +func (app *GRPCApplication) OfferSnapshot( + ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + res := app.app.OfferSnapshot(*req) + return &res, nil +} + +func (app *GRPCApplication) LoadSnapshotChunk( + ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + res := app.app.LoadSnapshotChunk(*req) + return &res, nil +} + +func (app *GRPCApplication) ApplySnapshotChunk( + ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + res := app.app.ApplySnapshotChunk(*req) + return &res, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go index ad18727a8..eaf1721dd 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -87,12 +87,6 @@ func ToRequestInfo(req RequestInfo) *Request { } } -func ToRequestSetOption(req RequestSetOption) *Request { - return &Request{ - Value: &Request_SetOption{&req}, - } -} - func ToRequestDeliverTx(req RequestDeliverTx) *Request { return &Request{ Value: &Request_DeliverTx{&req}, @@ -135,6 +129,30 @@ func ToRequestEndBlock(req RequestEndBlock) *Request { } } +func ToRequestListSnapshots(req RequestListSnapshots) *Request { + return &Request{ + Value: &Request_ListSnapshots{&req}, + } +} + +func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request { + return &Request{ + Value: &Request_OfferSnapshot{&req}, + } +} + +func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request { + return &Request{ + Value: &Request_LoadSnapshotChunk{&req}, + } +} + +func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request { + return &Request{ + Value: &Request_ApplySnapshotChunk{&req}, + } +} + //---------------------------------------- func ToResponseException(errStr string) *Response { @@ -160,13 +178,6 @@ func ToResponseInfo(res ResponseInfo) *Response { Value: &Response_Info{&res}, } } - -func ToResponseSetOption(res ResponseSetOption) *Response { - return &Response{ - Value: &Response_SetOption{&res}, - } -} - func ToResponseDeliverTx(res ResponseDeliverTx) *Response { return &Response{ Value: &Response_DeliverTx{&res}, @@ -208,3 +219,27 @@ func ToResponseEndBlock(res ResponseEndBlock) *Response { Value: &Response_EndBlock{&res}, } } + +func ToResponseListSnapshots(res ResponseListSnapshots) *Response { + return &Response{ + Value: &Response_ListSnapshots{&res}, + } +} + +func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response { + return &Response{ + Value: &Response_OfferSnapshot{&res}, + } +} + +func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response { + return &Response{ + Value: &Response_LoadSnapshotChunk{&res}, + } +} + +func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response { + return &Response{ + Value: &Response_ApplySnapshotChunk{&res}, + } +} diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 3aead256f..8da12cfff 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -9,7 +9,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/libs/kv" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func TestMarshalJSON(t *testing.T) { @@ -24,7 +24,7 @@ func TestMarshalJSON(t *testing.T) { Events: []Event{ { Type: "testEvent", - Attributes: []kv.Pair{ + Attributes: []EventAttribute{ {Key: []byte("pho"), Value: []byte("bo")}, }, }, @@ -55,13 +55,13 @@ func TestWriteReadMessageSimple(t *testing.T) { err = ReadMessage(buf, msg) assert.Nil(t, err) - assert.Equal(t, c, msg) + assert.True(t, proto.Equal(c, msg)) } } func TestWriteReadMessage(t *testing.T) { cases := []proto.Message{ - &Header{ + &tmproto.Header{ Height: 4, ChainID: "test", }, @@ -73,11 +73,11 @@ func TestWriteReadMessage(t *testing.T) { err := WriteMessage(c, buf) assert.Nil(t, err) - msg := new(Header) + msg := new(tmproto.Header) err = ReadMessage(buf, msg) assert.Nil(t, err) - assert.Equal(t, c, msg) + assert.True(t, proto.Equal(c, msg)) } } @@ -91,7 +91,7 @@ func TestWriteReadMessage2(t *testing.T) { Events: []Event{ { Type: "testEvent", - Attributes: []kv.Pair{ + Attributes: []EventAttribute{ {Key: []byte("abc"), Value: []byte("def")}, }, }, @@ -109,6 +109,6 @@ func TestWriteReadMessage2(t *testing.T) { err = ReadMessage(buf, msg) assert.Nil(t, err) - assert.Equal(t, c, msg) + assert.True(t, proto.Equal(c, msg)) } } diff --git a/abci/types/protoreplace/protoreplace.go b/abci/types/protoreplace/protoreplace.go deleted file mode 100644 index 7058a70fb..000000000 --- a/abci/types/protoreplace/protoreplace.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strings" -) - -// This script replaces most `[]byte` with `data.Bytes` in a `.pb.go` file. -// It was written before we realized we could use `gogo/protobuf` to achieve -// this more natively. So it's here for safe keeping in case we ever need to -// abandon `gogo/protobuf`. - -func main() { - bytePattern := regexp.MustCompile("[[][]]byte") - const oldPath = "types/types.pb.go" - const tmpPath = "types/types.pb.new" - content, err := ioutil.ReadFile(oldPath) - if err != nil { - panic("cannot read " + oldPath) - os.Exit(1) - } - lines := bytes.Split(content, []byte("\n")) - outFile, _ := os.Create(tmpPath) - wroteImport := false - for _, line_bytes := range lines { - line := string(line_bytes) - gotPackageLine := strings.HasPrefix(line, "package ") - writeImportTime := strings.HasPrefix(line, "import ") - containsDescriptor := strings.Contains(line, "Descriptor") - containsByteArray := strings.Contains(line, "[]byte") - if containsByteArray && !containsDescriptor { - line = string(bytePattern.ReplaceAll([]byte(line), []byte("data.Bytes"))) - } - if writeImportTime && !wroteImport { - wroteImport = true - fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n") - - } - if gotPackageLine { - fmt.Fprintf(outFile, "%s\n", "//nolint: gas") - } - fmt.Fprintf(outFile, "%s\n", line) - } - outFile.Close() - os.Remove(oldPath) - os.Rename(tmpPath, oldPath) - exec.Command("goimports", "-w", oldPath) -} diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index 46cd8c5e8..8530d9538 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -1,16 +1,44 @@ package types -const ( - PubKeyEd25519 = "ed25519" +import ( + fmt "fmt" + + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/secp256k1" ) -func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate { +func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { + pke := ed25519.PubKey(pk) + + pkp, err := cryptoenc.PubKeyToProto(pke) + if err != nil { + panic(err) + } + return ValidatorUpdate{ // Address: - PubKey: PubKey{ - Type: PubKeyEd25519, - Data: pubkey, - }, - Power: power, + PubKey: pkp, + Power: power, + } +} + +func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { + switch keyType { + case "", ed25519.KeyType: + return Ed25519ValidatorUpdate(pk, power) + case secp256k1.KeyType: + pke := secp256k1.PubKey(pk) + pkp, err := cryptoenc.PubKeyToProto(pke) + if err != nil { + panic(err) + } + return ValidatorUpdate{ + // Address: + PubKey: pkp, + Power: power, + } + default: + panic(fmt.Sprintf("key type %s not supported", keyType)) } } diff --git a/abci/types/result.go b/abci/types/result.go index 321e71f08..1442ea39c 100644 --- a/abci/types/result.go +++ b/abci/types/result.go @@ -52,16 +52,6 @@ var ( jsonpbUnmarshaller = jsonpb.Unmarshaler{} ) -func (r *ResponseSetOption) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *ResponseSetOption) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) { s, err := jsonpbMarshaller.MarshalToString(r) return []byte(s), err @@ -102,6 +92,16 @@ func (r *ResponseCommit) UnmarshalJSON(b []byte) error { return jsonpbUnmarshaller.Unmarshal(reader, r) } +func (r *EventAttribute) MarshalJSON() ([]byte, error) { + s, err := jsonpbMarshaller.MarshalToString(r) + return []byte(s), err +} + +func (r *EventAttribute) UnmarshalJSON(b []byte) error { + reader := bytes.NewBuffer(b) + return jsonpbUnmarshaller.Unmarshal(reader, r) +} + // Some compile time assertions to ensure we don't // have accidental runtime surprises later on. @@ -116,4 +116,5 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil) var _ jsonRoundTripper = (*ResponseQuery)(nil) var _ jsonRoundTripper = (*ResponseDeliverTx)(nil) var _ jsonRoundTripper = (*ResponseCheckTx)(nil) -var _ jsonRoundTripper = (*ResponseSetOption)(nil) + +var _ jsonRoundTripper = (*EventAttribute)(nil) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 51ff4aedd..67599af15 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1,20 +1,17 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: abci/types/types.proto +// source: tendermint/abci/types.proto package types import ( - bytes "bytes" context "context" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" - merkle "github.com/tendermint/tendermint/crypto/merkle" - kv "github.com/tendermint/tendermint/libs/kv" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -26,7 +23,6 @@ import ( // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal -var _ = golang_proto.Marshal var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen @@ -45,13 +41,13 @@ const ( ) var CheckTxType_name = map[int32]string{ - 0: "New", - 1: "Recheck", + 0: "NEW", + 1: "RECHECK", } var CheckTxType_value = map[string]int32{ - "New": 0, - "Recheck": 1, + "NEW": 0, + "RECHECK": 1, } func (x CheckTxType) String() string { @@ -59,7 +55,109 @@ func (x CheckTxType) String() string { } func (CheckTxType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{0} + return fileDescriptor_252557cfdd89a31a, []int{0} +} + +type EvidenceType int32 + +const ( + EvidenceType_UNKNOWN EvidenceType = 0 + EvidenceType_DUPLICATE_VOTE EvidenceType = 1 + EvidenceType_LIGHT_CLIENT_ATTACK EvidenceType = 2 +) + +var EvidenceType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DUPLICATE_VOTE", + 2: "LIGHT_CLIENT_ATTACK", +} + +var EvidenceType_value = map[string]int32{ + "UNKNOWN": 0, + "DUPLICATE_VOTE": 1, + "LIGHT_CLIENT_ATTACK": 2, +} + +func (x EvidenceType) String() string { + return proto.EnumName(EvidenceType_name, int32(x)) +} + +func (EvidenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{1} +} + +type ResponseOfferSnapshot_Result int32 + +const ( + ResponseOfferSnapshot_UNKNOWN ResponseOfferSnapshot_Result = 0 + ResponseOfferSnapshot_ACCEPT ResponseOfferSnapshot_Result = 1 + ResponseOfferSnapshot_ABORT ResponseOfferSnapshot_Result = 2 + ResponseOfferSnapshot_REJECT ResponseOfferSnapshot_Result = 3 + ResponseOfferSnapshot_REJECT_FORMAT ResponseOfferSnapshot_Result = 4 + ResponseOfferSnapshot_REJECT_SENDER ResponseOfferSnapshot_Result = 5 +) + +var ResponseOfferSnapshot_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "ABORT", + 3: "REJECT", + 4: "REJECT_FORMAT", + 5: "REJECT_SENDER", +} + +var ResponseOfferSnapshot_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "ABORT": 2, + "REJECT": 3, + "REJECT_FORMAT": 4, + "REJECT_SENDER": 5, +} + +func (x ResponseOfferSnapshot_Result) String() string { + return proto.EnumName(ResponseOfferSnapshot_Result_name, int32(x)) +} + +func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{28, 0} +} + +type ResponseApplySnapshotChunk_Result int32 + +const ( + ResponseApplySnapshotChunk_UNKNOWN ResponseApplySnapshotChunk_Result = 0 + ResponseApplySnapshotChunk_ACCEPT ResponseApplySnapshotChunk_Result = 1 + ResponseApplySnapshotChunk_ABORT ResponseApplySnapshotChunk_Result = 2 + ResponseApplySnapshotChunk_RETRY ResponseApplySnapshotChunk_Result = 3 + ResponseApplySnapshotChunk_RETRY_SNAPSHOT ResponseApplySnapshotChunk_Result = 4 + ResponseApplySnapshotChunk_REJECT_SNAPSHOT ResponseApplySnapshotChunk_Result = 5 +) + +var ResponseApplySnapshotChunk_Result_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "ABORT", + 3: "RETRY", + 4: "RETRY_SNAPSHOT", + 5: "REJECT_SNAPSHOT", +} + +var ResponseApplySnapshotChunk_Result_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "ABORT": 2, + "RETRY": 3, + "RETRY_SNAPSHOT": 4, + "REJECT_SNAPSHOT": 5, +} + +func (x ResponseApplySnapshotChunk_Result) String() string { + return proto.EnumName(ResponseApplySnapshotChunk_Result_name, int32(x)) +} + +func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{30, 0} } type Request struct { @@ -67,7 +165,6 @@ type Request struct { // *Request_Echo // *Request_Flush // *Request_Info - // *Request_SetOption // *Request_InitChain // *Request_Query // *Request_BeginBlock @@ -75,17 +172,18 @@ type Request struct { // *Request_DeliverTx // *Request_EndBlock // *Request_Commit - Value isRequest_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // *Request_ListSnapshots + // *Request_OfferSnapshot + // *Request_LoadSnapshotChunk + // *Request_ApplySnapshotChunk + Value isRequest_Value `protobuf_oneof:"value"` } func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{0} + return fileDescriptor_252557cfdd89a31a, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -116,56 +214,67 @@ var xxx_messageInfo_Request proto.InternalMessageInfo type isRequest_Value interface { isRequest_Value() - Equal(interface{}) bool MarshalTo([]byte) (int, error) Size() int } type Request_Echo struct { - Echo *RequestEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` + Echo *RequestEcho `protobuf:"bytes,1,opt,name=echo,proto3,oneof" json:"echo,omitempty"` } type Request_Flush struct { - Flush *RequestFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` + Flush *RequestFlush `protobuf:"bytes,2,opt,name=flush,proto3,oneof" json:"flush,omitempty"` } type Request_Info struct { - Info *RequestInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` -} -type Request_SetOption struct { - SetOption *RequestSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` + Info *RequestInfo `protobuf:"bytes,3,opt,name=info,proto3,oneof" json:"info,omitempty"` } type Request_InitChain struct { - InitChain *RequestInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *RequestInitChain `protobuf:"bytes,4,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` } type Request_Query struct { - Query *RequestQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *RequestQuery `protobuf:"bytes,5,opt,name=query,proto3,oneof" json:"query,omitempty"` } type Request_BeginBlock struct { - BeginBlock *RequestBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` + BeginBlock *RequestBeginBlock `protobuf:"bytes,6,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` } type Request_CheckTx struct { - CheckTx *RequestCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *RequestCheckTx `protobuf:"bytes,7,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } type Request_DeliverTx struct { - DeliverTx *RequestDeliverTx `protobuf:"bytes,19,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` + DeliverTx *RequestDeliverTx `protobuf:"bytes,8,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` } type Request_EndBlock struct { - EndBlock *RequestEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` + EndBlock *RequestEndBlock `protobuf:"bytes,9,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` } type Request_Commit struct { - Commit *RequestCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` + Commit *RequestCommit `protobuf:"bytes,10,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Request_ListSnapshots struct { + ListSnapshots *RequestListSnapshots `protobuf:"bytes,11,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Request_OfferSnapshot struct { + OfferSnapshot *RequestOfferSnapshot `protobuf:"bytes,12,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Request_LoadSnapshotChunk struct { + LoadSnapshotChunk *RequestLoadSnapshotChunk `protobuf:"bytes,13,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Request_ApplySnapshotChunk struct { + ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,14,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } -func (*Request_Echo) isRequest_Value() {} -func (*Request_Flush) isRequest_Value() {} -func (*Request_Info) isRequest_Value() {} -func (*Request_SetOption) isRequest_Value() {} -func (*Request_InitChain) isRequest_Value() {} -func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} -func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} -func (*Request_Commit) isRequest_Value() {} +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -195,13 +304,6 @@ func (m *Request) GetInfo() *RequestInfo { return nil } -func (m *Request) GetSetOption() *RequestSetOption { - if x, ok := m.GetValue().(*Request_SetOption); ok { - return x.SetOption - } - return nil -} - func (m *Request) GetInitChain() *RequestInitChain { if x, ok := m.GetValue().(*Request_InitChain); ok { return x.InitChain @@ -251,13 +353,40 @@ func (m *Request) GetCommit() *RequestCommit { return nil } +func (m *Request) GetListSnapshots() *RequestListSnapshots { + if x, ok := m.GetValue().(*Request_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Request) GetOfferSnapshot() *RequestOfferSnapshot { + if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Request) GetLoadSnapshotChunk() *RequestLoadSnapshotChunk { + if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { + if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Request) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Request_Echo)(nil), (*Request_Flush)(nil), (*Request_Info)(nil), - (*Request_SetOption)(nil), (*Request_InitChain)(nil), (*Request_Query)(nil), (*Request_BeginBlock)(nil), @@ -265,21 +394,22 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_DeliverTx)(nil), (*Request_EndBlock)(nil), (*Request_Commit)(nil), + (*Request_ListSnapshots)(nil), + (*Request_OfferSnapshot)(nil), + (*Request_LoadSnapshotChunk)(nil), + (*Request_ApplySnapshotChunk)(nil), } } type RequestEcho struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{1} + return fileDescriptor_252557cfdd89a31a, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -316,16 +446,13 @@ func (m *RequestEcho) GetMessage() string { } type RequestFlush struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{2} + return fileDescriptor_252557cfdd89a31a, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -355,19 +482,16 @@ func (m *RequestFlush) XXX_DiscardUnknown() { var xxx_messageInfo_RequestFlush proto.InternalMessageInfo type RequestInfo struct { - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` - P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` + P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` } func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{3} + return fileDescriptor_252557cfdd89a31a, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -417,78 +541,20 @@ func (m *RequestInfo) GetP2PVersion() uint64 { return 0 } -// nondeterministic -type RequestSetOption struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } -func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } -func (*RequestSetOption) ProtoMessage() {} -func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{4} -} -func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestSetOption.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestSetOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestSetOption.Merge(m, src) -} -func (m *RequestSetOption) XXX_Size() int { - return m.Size() -} -func (m *RequestSetOption) XXX_DiscardUnknown() { - xxx_messageInfo_RequestSetOption.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestSetOption proto.InternalMessageInfo - -func (m *RequestSetOption) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *RequestSetOption) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` - AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + InitialHeight int64 `protobuf:"varint,6,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` } func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{5} + return fileDescriptor_252557cfdd89a31a, []int{4} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -552,21 +618,25 @@ func (m *RequestInitChain) GetAppStateBytes() []byte { return nil } +func (m *RequestInitChain) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + type RequestQuery struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Prove bool `protobuf:"varint,4,opt,name=prove,proto3" json:"prove,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Prove bool `protobuf:"varint,4,opt,name=prove,proto3" json:"prove,omitempty"` } func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{6} + return fileDescriptor_252557cfdd89a31a, []int{5} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -624,20 +694,17 @@ func (m *RequestQuery) GetProve() bool { } type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` } func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{7} + return fileDescriptor_252557cfdd89a31a, []int{6} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -673,11 +740,11 @@ func (m *RequestBeginBlock) GetHash() []byte { return nil } -func (m *RequestBeginBlock) GetHeader() Header { +func (m *RequestBeginBlock) GetHeader() types1.Header { if m != nil { return m.Header } - return Header{} + return types1.Header{} } func (m *RequestBeginBlock) GetLastCommitInfo() LastCommitInfo { @@ -695,18 +762,15 @@ func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { } type RequestCheckTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.types.CheckTxType" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Type CheckTxType `protobuf:"varint,2,opt,name=type,proto3,enum=tendermint.abci.CheckTxType" json:"type,omitempty"` } func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{8} + return fileDescriptor_252557cfdd89a31a, []int{7} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -750,17 +814,14 @@ func (m *RequestCheckTx) GetType() CheckTxType { } type RequestDeliverTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` } func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{9} + return fileDescriptor_252557cfdd89a31a, []int{8} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -797,17 +858,14 @@ func (m *RequestDeliverTx) GetTx() []byte { } type RequestEndBlock struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{10} + return fileDescriptor_252557cfdd89a31a, []int{9} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -844,16 +902,13 @@ func (m *RequestEndBlock) GetHeight() int64 { } type RequestCommit struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{11} + return fileDescriptor_252557cfdd89a31a, []int{10} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -882,13 +937,224 @@ func (m *RequestCommit) XXX_DiscardUnknown() { var xxx_messageInfo_RequestCommit proto.InternalMessageInfo +// lists available snapshots +type RequestListSnapshots struct { +} + +func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } +func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } +func (*RequestListSnapshots) ProtoMessage() {} +func (*RequestListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{11} +} +func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestListSnapshots.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestListSnapshots.Merge(m, src) +} +func (m *RequestListSnapshots) XXX_Size() int { + return m.Size() +} +func (m *RequestListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_RequestListSnapshots.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestListSnapshots proto.InternalMessageInfo + +// offers a snapshot to the application +type RequestOfferSnapshot struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } +func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*RequestOfferSnapshot) ProtoMessage() {} +func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{12} +} +func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestOfferSnapshot.Merge(m, src) +} +func (m *RequestOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *RequestOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_RequestOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestOfferSnapshot proto.InternalMessageInfo + +func (m *RequestOfferSnapshot) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *RequestOfferSnapshot) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// loads a snapshot chunk +type RequestLoadSnapshotChunk struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunk uint32 `protobuf:"varint,3,opt,name=chunk,proto3" json:"chunk,omitempty"` +} + +func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChunk{} } +func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestLoadSnapshotChunk) ProtoMessage() {} +func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{13} +} +func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestLoadSnapshotChunk.Merge(m, src) +} +func (m *RequestLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestLoadSnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestLoadSnapshotChunk proto.InternalMessageInfo + +func (m *RequestLoadSnapshotChunk) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { + if m != nil { + return m.Chunk + } + return 0 +} + +// Applies a snapshot chunk +type RequestApplySnapshotChunk struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` +} + +func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotChunk{} } +func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestApplySnapshotChunk) ProtoMessage() {} +func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{14} +} +func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestApplySnapshotChunk.Merge(m, src) +} +func (m *RequestApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestApplySnapshotChunk proto.InternalMessageInfo + +func (m *RequestApplySnapshotChunk) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RequestApplySnapshotChunk) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *RequestApplySnapshotChunk) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + type Response struct { // Types that are valid to be assigned to Value: // *Response_Exception // *Response_Echo // *Response_Flush // *Response_Info - // *Response_SetOption // *Response_InitChain // *Response_Query // *Response_BeginBlock @@ -896,17 +1162,18 @@ type Response struct { // *Response_DeliverTx // *Response_EndBlock // *Response_Commit - Value isResponse_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk + Value isResponse_Value `protobuf_oneof:"value"` } func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{12} + return fileDescriptor_252557cfdd89a31a, []int{15} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -937,7 +1204,6 @@ var xxx_messageInfo_Response proto.InternalMessageInfo type isResponse_Value interface { isResponse_Value() - Equal(interface{}) bool MarshalTo([]byte) (int, error) Size() int } @@ -954,43 +1220,55 @@ type Response_Flush struct { type Response_Info struct { Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` } -type Response_SetOption struct { - SetOption *ResponseSetOption `protobuf:"bytes,5,opt,name=set_option,json=setOption,proto3,oneof" json:"set_option,omitempty"` -} type Response_InitChain struct { - InitChain *ResponseInitChain `protobuf:"bytes,6,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` + InitChain *ResponseInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` } type Response_Query struct { - Query *ResponseQuery `protobuf:"bytes,7,opt,name=query,proto3,oneof" json:"query,omitempty"` + Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` } type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,8,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` + BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` } type Response_CheckTx struct { - CheckTx *ResponseCheckTx `protobuf:"bytes,9,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` + CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` } type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,10,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` + DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` } type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,11,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` + EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` } type Response_Commit struct { - Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` -} - -func (*Response_Exception) isResponse_Value() {} -func (*Response_Echo) isResponse_Value() {} -func (*Response_Flush) isResponse_Value() {} -func (*Response_Info) isResponse_Value() {} -func (*Response_SetOption) isResponse_Value() {} -func (*Response_InitChain) isResponse_Value() {} -func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} -func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} -func (*Response_Commit) isResponse_Value() {} + Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Response_ListSnapshots struct { + ListSnapshots *ResponseListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} func (m *Response) GetValue() isResponse_Value { if m != nil { @@ -1027,13 +1305,6 @@ func (m *Response) GetInfo() *ResponseInfo { return nil } -func (m *Response) GetSetOption() *ResponseSetOption { - if x, ok := m.GetValue().(*Response_SetOption); ok { - return x.SetOption - } - return nil -} - func (m *Response) GetInitChain() *ResponseInitChain { if x, ok := m.GetValue().(*Response_InitChain); ok { return x.InitChain @@ -1083,6 +1354,34 @@ func (m *Response) GetCommit() *ResponseCommit { return nil } +func (m *Response) GetListSnapshots() *ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Response) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1090,7 +1389,6 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_Echo)(nil), (*Response_Flush)(nil), (*Response_Info)(nil), - (*Response_SetOption)(nil), (*Response_InitChain)(nil), (*Response_Query)(nil), (*Response_BeginBlock)(nil), @@ -1098,22 +1396,23 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_DeliverTx)(nil), (*Response_EndBlock)(nil), (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), } } // nondeterministic type ResponseException struct { - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{13} + return fileDescriptor_252557cfdd89a31a, []int{16} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1150,17 +1449,14 @@ func (m *ResponseException) GetError() string { } type ResponseEcho struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{14} + return fileDescriptor_252557cfdd89a31a, []int{17} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,16 +1493,13 @@ func (m *ResponseEcho) GetMessage() string { } type ResponseFlush struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{15} + return fileDescriptor_252557cfdd89a31a, []int{18} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1236,21 +1529,19 @@ func (m *ResponseFlush) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo type ResponseInfo struct { - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // this is the software version of the application. TODO: remove? + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` } func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{16} + return fileDescriptor_252557cfdd89a31a, []int{19} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1314,84 +1605,17 @@ func (m *ResponseInfo) GetLastBlockAppHash() []byte { return nil } -// nondeterministic -type ResponseSetOption struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // bytes data = 2; - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } -func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } -func (*ResponseSetOption) ProtoMessage() {} -func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{17} -} -func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseSetOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseSetOption.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseSetOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseSetOption.Merge(m, src) -} -func (m *ResponseSetOption) XXX_Size() int { - return m.Size() -} -func (m *ResponseSetOption) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseSetOption.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseSetOption proto.InternalMessageInfo - -func (m *ResponseSetOption) GetCode() uint32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *ResponseSetOption) GetLog() string { - if m != nil { - return m.Log - } - return "" -} - -func (m *ResponseSetOption) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - type ResponseInitChain struct { - ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators,proto3" json:"validators"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` } func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{18} + return fileDescriptor_252557cfdd89a31a, []int{20} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1434,27 +1658,31 @@ func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { return nil } +func (m *ResponseInitChain) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + type ResponseQuery struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + ProofOps *crypto.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` } func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{19} + return fileDescriptor_252557cfdd89a31a, []int{21} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1525,9 +1753,9 @@ func (m *ResponseQuery) GetValue() []byte { return nil } -func (m *ResponseQuery) GetProof() *merkle.Proof { +func (m *ResponseQuery) GetProofOps() *crypto.ProofOps { if m != nil { - return m.Proof + return m.ProofOps } return nil } @@ -1547,17 +1775,14 @@ func (m *ResponseQuery) GetCodespace() string { } type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{20} + return fileDescriptor_252557cfdd89a31a, []int{22} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1594,24 +1819,21 @@ func (m *ResponseBeginBlock) GetEvents() []Event { } type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{21} + return fileDescriptor_252557cfdd89a31a, []int{23} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1697,24 +1919,21 @@ func (m *ResponseCheckTx) GetCodespace() string { } type ResponseDeliverTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{22} + return fileDescriptor_252557cfdd89a31a, []int{24} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1803,16 +2022,13 @@ type ResponseEndBlock struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{23} + return fileDescriptor_252557cfdd89a31a, []int{25} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1864,18 +2080,15 @@ func (m *ResponseEndBlock) GetEvents() []Event { type ResponseCommit struct { // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{24} + return fileDescriptor_252557cfdd89a31a, []int{26} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1918,29 +2131,22 @@ func (m *ResponseCommit) GetRetainHeight() int64 { return 0 } -// ConsensusParams contains all consensus-relevant parameters -// that can be adjusted by the abci app -type ConsensusParams struct { - Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` - Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` - Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ResponseListSnapshots struct { + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` } -func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } -func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } -func (*ConsensusParams) ProtoMessage() {} -func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{25} +func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } +func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } +func (*ResponseListSnapshots) ProtoMessage() {} +func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{27} } -func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { +func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1950,62 +2156,131 @@ func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return b[:n], nil } } -func (m *ConsensusParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConsensusParams.Merge(m, src) +func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseListSnapshots.Merge(m, src) } -func (m *ConsensusParams) XXX_Size() int { +func (m *ResponseListSnapshots) XXX_Size() int { return m.Size() } -func (m *ConsensusParams) XXX_DiscardUnknown() { - xxx_messageInfo_ConsensusParams.DiscardUnknown(m) +func (m *ResponseListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) } -var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo +var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo -func (m *ConsensusParams) GetBlock() *BlockParams { +func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { if m != nil { - return m.Block + return m.Snapshots } return nil } -func (m *ConsensusParams) GetEvidence() *EvidenceParams { +type ResponseOfferSnapshot struct { + Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseOfferSnapshot_Result" json:"result,omitempty"` +} + +func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } +func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*ResponseOfferSnapshot) ProtoMessage() {} +func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{28} +} +func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) +} +func (m *ResponseOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo + +func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { if m != nil { - return m.Evidence + return m.Result } - return nil + return ResponseOfferSnapshot_UNKNOWN +} + +type ResponseLoadSnapshotChunk struct { + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` +} + +func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } +func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseLoadSnapshotChunk) ProtoMessage() {} +func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{29} +} +func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) +} +func (m *ResponseLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) } -func (m *ConsensusParams) GetValidator() *ValidatorParams { +var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo + +func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { if m != nil { - return m.Validator + return m.Chunk } return nil } -// BlockParams contains limits on the block size. -type BlockParams struct { - // Note: must be greater than 0 - MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - // Note: must be greater or equal to -1 - MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ResponseApplySnapshotChunk struct { + Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` + RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` } -func (m *BlockParams) Reset() { *m = BlockParams{} } -func (m *BlockParams) String() string { return proto.CompactTextString(m) } -func (*BlockParams) ProtoMessage() {} -func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{26} +func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } +func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseApplySnapshotChunk) ProtoMessage() {} +func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{30} } -func (m *BlockParams) XXX_Unmarshal(b []byte) error { +func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2015,53 +2290,60 @@ func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *BlockParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockParams.Merge(m, src) +func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) } -func (m *BlockParams) XXX_Size() int { +func (m *ResponseApplySnapshotChunk) XXX_Size() int { return m.Size() } -func (m *BlockParams) XXX_DiscardUnknown() { - xxx_messageInfo_BlockParams.DiscardUnknown(m) +func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) } -var xxx_messageInfo_BlockParams proto.InternalMessageInfo +var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo -func (m *BlockParams) GetMaxBytes() int64 { +func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { if m != nil { - return m.MaxBytes + return m.Result } - return 0 + return ResponseApplySnapshotChunk_UNKNOWN } -func (m *BlockParams) GetMaxGas() int64 { +func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { if m != nil { - return m.MaxGas + return m.RefetchChunks } - return 0 + return nil } -type EvidenceParams struct { - // Note: must be greater than 0 - MaxAgeNumBlocks int64 `protobuf:"varint,1,opt,name=max_age_num_blocks,json=maxAgeNumBlocks,proto3" json:"max_age_num_blocks,omitempty"` - MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { + if m != nil { + return m.RejectSenders + } + return nil +} + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +type ConsensusParams struct { + Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + Evidence *types1.EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` + Validator *types1.ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + Version *types1.VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` } -func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } -func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } -func (*EvidenceParams) ProtoMessage() {} -func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{27} +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{31} } -func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { +func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_EvidenceParams.Marshal(b, m, deterministic) + return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2071,52 +2353,66 @@ func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *EvidenceParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_EvidenceParams.Merge(m, src) +func (m *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(m, src) } -func (m *EvidenceParams) XXX_Size() int { +func (m *ConsensusParams) XXX_Size() int { return m.Size() } -func (m *EvidenceParams) XXX_DiscardUnknown() { - xxx_messageInfo_EvidenceParams.DiscardUnknown(m) +func (m *ConsensusParams) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParams.DiscardUnknown(m) } -var xxx_messageInfo_EvidenceParams proto.InternalMessageInfo +var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo -func (m *EvidenceParams) GetMaxAgeNumBlocks() int64 { +func (m *ConsensusParams) GetBlock() *BlockParams { if m != nil { - return m.MaxAgeNumBlocks + return m.Block } - return 0 + return nil +} + +func (m *ConsensusParams) GetEvidence() *types1.EvidenceParams { + if m != nil { + return m.Evidence + } + return nil } -func (m *EvidenceParams) GetMaxAgeDuration() time.Duration { +func (m *ConsensusParams) GetValidator() *types1.ValidatorParams { if m != nil { - return m.MaxAgeDuration + return m.Validator } - return 0 + return nil } -// ValidatorParams contains limits on validators. -type ValidatorParams struct { - PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes,proto3" json:"pub_key_types,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *ConsensusParams) GetVersion() *types1.VersionParams { + if m != nil { + return m.Version + } + return nil +} + +// BlockParams contains limits on the block size. +type BlockParams struct { + // Note: must be greater than 0 + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Note: must be greater or equal to -1 + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` } -func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } -func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } -func (*ValidatorParams) ProtoMessage() {} -func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{28} +func (m *BlockParams) Reset() { *m = BlockParams{} } +func (m *BlockParams) String() string { return proto.CompactTextString(m) } +func (*BlockParams) ProtoMessage() {} +func (*BlockParams) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{32} } -func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { +func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) + return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2126,38 +2422,42 @@ func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, err return b[:n], nil } } -func (m *ValidatorParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorParams.Merge(m, src) +func (m *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(m, src) } -func (m *ValidatorParams) XXX_Size() int { +func (m *BlockParams) XXX_Size() int { return m.Size() } -func (m *ValidatorParams) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatorParams.DiscardUnknown(m) +func (m *BlockParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockParams.DiscardUnknown(m) } -var xxx_messageInfo_ValidatorParams proto.InternalMessageInfo +var xxx_messageInfo_BlockParams proto.InternalMessageInfo + +func (m *BlockParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} -func (m *ValidatorParams) GetPubKeyTypes() []string { +func (m *BlockParams) GetMaxGas() int64 { if m != nil { - return m.PubKeyTypes + return m.MaxGas } - return nil + return 0 } type LastCommitInfo struct { - Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` - Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` } func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{29} + return fileDescriptor_252557cfdd89a31a, []int{33} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2200,19 +2500,19 @@ func (m *LastCommitInfo) GetVotes() []VoteInfo { return nil } +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. type Event struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Attributes []kv.Pair `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` } func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{30} + return fileDescriptor_252557cfdd89a31a, []int{34} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2248,50 +2548,32 @@ func (m *Event) GetType() string { return "" } -func (m *Event) GetAttributes() []kv.Pair { +func (m *Event) GetAttributes() []EventAttribute { if m != nil { return m.Attributes } return nil } -type Header struct { - // basic block info - Version Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` - ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` - // prev block info - LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` - // hashes of block data - LastCommitHash []byte `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` - DataHash []byte `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` - // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,8,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` - NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` - // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,14,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{31} -} -func (m *Header) XXX_Unmarshal(b []byte) error { +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *EventAttribute) Reset() { *m = EventAttribute{} } +func (m *EventAttribute) String() string { return proto.CompactTextString(m) } +func (*EventAttribute) ProtoMessage() {} +func (*EventAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{35} +} +func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) + return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2301,296 +2583,122 @@ func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) +func (m *EventAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventAttribute.Merge(m, src) } -func (m *Header) XXX_Size() int { +func (m *EventAttribute) XXX_Size() int { return m.Size() } -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) +func (m *EventAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_EventAttribute.DiscardUnknown(m) } -var xxx_messageInfo_Header proto.InternalMessageInfo +var xxx_messageInfo_EventAttribute proto.InternalMessageInfo -func (m *Header) GetVersion() Version { +func (m *EventAttribute) GetKey() []byte { if m != nil { - return m.Version + return m.Key } - return Version{} + return nil } -func (m *Header) GetChainID() string { +func (m *EventAttribute) GetValue() []byte { if m != nil { - return m.ChainID + return m.Value } - return "" + return nil } -func (m *Header) GetHeight() int64 { +func (m *EventAttribute) GetIndex() bool { if m != nil { - return m.Height + return m.Index } - return 0 + return false } -func (m *Header) GetTime() time.Time { - if m != nil { - return m.Time - } - return time.Time{} +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +type TxResult struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` } -func (m *Header) GetLastBlockId() BlockID { - if m != nil { - return m.LastBlockId - } - return BlockID{} +func (m *TxResult) Reset() { *m = TxResult{} } +func (m *TxResult) String() string { return proto.CompactTextString(m) } +func (*TxResult) ProtoMessage() {} +func (*TxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{36} } - -func (m *Header) GetLastCommitHash() []byte { - if m != nil { - return m.LastCommitHash - } - return nil +func (m *TxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *Header) GetDataHash() []byte { - if m != nil { - return m.DataHash +func (m *TxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil } - -func (m *Header) GetValidatorsHash() []byte { - if m != nil { - return m.ValidatorsHash - } - return nil +func (m *TxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxResult.Merge(m, src) +} +func (m *TxResult) XXX_Size() int { + return m.Size() +} +func (m *TxResult) XXX_DiscardUnknown() { + xxx_messageInfo_TxResult.DiscardUnknown(m) } -func (m *Header) GetNextValidatorsHash() []byte { +var xxx_messageInfo_TxResult proto.InternalMessageInfo + +func (m *TxResult) GetHeight() int64 { if m != nil { - return m.NextValidatorsHash + return m.Height } - return nil + return 0 } -func (m *Header) GetConsensusHash() []byte { +func (m *TxResult) GetIndex() uint32 { if m != nil { - return m.ConsensusHash + return m.Index } - return nil + return 0 } -func (m *Header) GetAppHash() []byte { +func (m *TxResult) GetTx() []byte { if m != nil { - return m.AppHash + return m.Tx } return nil } -func (m *Header) GetLastResultsHash() []byte { +func (m *TxResult) GetResult() ResponseDeliverTx { if m != nil { - return m.LastResultsHash + return m.Result } - return nil -} - -func (m *Header) GetEvidenceHash() []byte { - if m != nil { - return m.EvidenceHash - } - return nil -} - -func (m *Header) GetProposerAddress() []byte { - if m != nil { - return m.ProposerAddress - } - return nil -} - -type Version struct { - Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` - App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{32} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return m.Size() -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Version) GetApp() uint64 { - if m != nil { - return m.App - } - return 0 -} - -type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader,proto3" json:"parts_header"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{33} -} -func (m *BlockID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockID.Merge(m, src) -} -func (m *BlockID) XXX_Size() int { - return m.Size() -} -func (m *BlockID) XXX_DiscardUnknown() { - xxx_messageInfo_BlockID.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockID proto.InternalMessageInfo - -func (m *BlockID) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *BlockID) GetPartsHeader() PartSetHeader { - if m != nil { - return m.PartsHeader - } - return PartSetHeader{} -} - -type PartSetHeader struct { - Total int32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{34} -} -func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PartSetHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartSetHeader.Merge(m, src) -} -func (m *PartSetHeader) XXX_Size() int { - return m.Size() -} -func (m *PartSetHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PartSetHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo - -func (m *PartSetHeader) GetTotal() int32 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *PartSetHeader) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil + return ResponseDeliverTx{} } // Validator type Validator struct { Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` } func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{35} + return fileDescriptor_252557cfdd89a31a, []int{37} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2635,18 +2743,15 @@ func (m *Validator) GetPower() int64 { // ValidatorUpdate type ValidatorUpdate struct { - PubKey PubKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` - Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` } func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{36} + return fileDescriptor_252557cfdd89a31a, []int{38} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2675,11 +2780,11 @@ func (m *ValidatorUpdate) XXX_DiscardUnknown() { var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo -func (m *ValidatorUpdate) GetPubKey() PubKey { +func (m *ValidatorUpdate) GetPubKey() crypto.PublicKey { if m != nil { return m.PubKey } - return PubKey{} + return crypto.PublicKey{} } func (m *ValidatorUpdate) GetPower() int64 { @@ -2691,18 +2796,15 @@ func (m *ValidatorUpdate) GetPower() int64 { // VoteInfo type VoteInfo struct { - Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` - SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` } func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{37} + return fileDescriptor_252557cfdd89a31a, []int{39} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2745,77 +2847,25 @@ func (m *VoteInfo) GetSignedLastBlock() bool { return false } -type PubKey struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PubKey) Reset() { *m = PubKey{} } -func (m *PubKey) String() string { return proto.CompactTextString(m) } -func (*PubKey) ProtoMessage() {} -func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{38} -} -func (m *PubKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PubKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PubKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PubKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_PubKey.Merge(m, src) -} -func (m *PubKey) XXX_Size() int { - return m.Size() -} -func (m *PubKey) XXX_DiscardUnknown() { - xxx_messageInfo_PubKey.DiscardUnknown(m) -} - -var xxx_messageInfo_PubKey proto.InternalMessageInfo - -func (m *PubKey) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PubKey) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - type Evidence struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` - TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type EvidenceType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.EvidenceType" json:"type,omitempty"` + // The offending validator + Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + // The height when the offense occurred + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + // The corresponding time where the offense occurred + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` } func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{39} + return fileDescriptor_252557cfdd89a31a, []int{40} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2844,11 +2894,11 @@ func (m *Evidence) XXX_DiscardUnknown() { var xxx_messageInfo_Evidence proto.InternalMessageInfo -func (m *Evidence) GetType() string { +func (m *Evidence) GetType() EvidenceType { if m != nil { return m.Type } - return "" + return EvidenceType_UNKNOWN } func (m *Evidence) GetValidator() Validator { @@ -2879,2165 +2929,303 @@ func (m *Evidence) GetTotalVotingPower() int64 { return 0 } -func init() { - proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) - golang_proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) - proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") - golang_proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") - proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.types.RequestEcho") - golang_proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.types.RequestEcho") - proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.types.RequestFlush") - golang_proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.types.RequestFlush") - proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.types.RequestInfo") - golang_proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.types.RequestInfo") - proto.RegisterType((*RequestSetOption)(nil), "tendermint.abci.types.RequestSetOption") - golang_proto.RegisterType((*RequestSetOption)(nil), "tendermint.abci.types.RequestSetOption") - proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.types.RequestInitChain") - golang_proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.types.RequestInitChain") - proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.types.RequestQuery") - golang_proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.types.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.types.RequestBeginBlock") - golang_proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.types.RequestBeginBlock") - proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.types.RequestCheckTx") - golang_proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.types.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.types.RequestDeliverTx") - golang_proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.types.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.types.RequestEndBlock") - golang_proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.types.RequestEndBlock") - proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.types.RequestCommit") - golang_proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.types.RequestCommit") - proto.RegisterType((*Response)(nil), "tendermint.abci.types.Response") - golang_proto.RegisterType((*Response)(nil), "tendermint.abci.types.Response") - proto.RegisterType((*ResponseException)(nil), "tendermint.abci.types.ResponseException") - golang_proto.RegisterType((*ResponseException)(nil), "tendermint.abci.types.ResponseException") - proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.types.ResponseEcho") - golang_proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.types.ResponseEcho") - proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.types.ResponseFlush") - golang_proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.types.ResponseFlush") - proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.types.ResponseInfo") - golang_proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.types.ResponseInfo") - proto.RegisterType((*ResponseSetOption)(nil), "tendermint.abci.types.ResponseSetOption") - golang_proto.RegisterType((*ResponseSetOption)(nil), "tendermint.abci.types.ResponseSetOption") - proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.types.ResponseInitChain") - golang_proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.types.ResponseInitChain") - proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.types.ResponseQuery") - golang_proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.types.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.types.ResponseBeginBlock") - golang_proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.types.ResponseBeginBlock") - proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.types.ResponseCheckTx") - golang_proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.types.ResponseCheckTx") - proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.types.ResponseDeliverTx") - golang_proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.types.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.types.ResponseEndBlock") - golang_proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.types.ResponseEndBlock") - proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.types.ResponseCommit") - golang_proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.types.ResponseCommit") - proto.RegisterType((*ConsensusParams)(nil), "tendermint.abci.types.ConsensusParams") - golang_proto.RegisterType((*ConsensusParams)(nil), "tendermint.abci.types.ConsensusParams") - proto.RegisterType((*BlockParams)(nil), "tendermint.abci.types.BlockParams") - golang_proto.RegisterType((*BlockParams)(nil), "tendermint.abci.types.BlockParams") - proto.RegisterType((*EvidenceParams)(nil), "tendermint.abci.types.EvidenceParams") - golang_proto.RegisterType((*EvidenceParams)(nil), "tendermint.abci.types.EvidenceParams") - proto.RegisterType((*ValidatorParams)(nil), "tendermint.abci.types.ValidatorParams") - golang_proto.RegisterType((*ValidatorParams)(nil), "tendermint.abci.types.ValidatorParams") - proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.types.LastCommitInfo") - golang_proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.types.LastCommitInfo") - proto.RegisterType((*Event)(nil), "tendermint.abci.types.Event") - golang_proto.RegisterType((*Event)(nil), "tendermint.abci.types.Event") - proto.RegisterType((*Header)(nil), "tendermint.abci.types.Header") - golang_proto.RegisterType((*Header)(nil), "tendermint.abci.types.Header") - proto.RegisterType((*Version)(nil), "tendermint.abci.types.Version") - golang_proto.RegisterType((*Version)(nil), "tendermint.abci.types.Version") - proto.RegisterType((*BlockID)(nil), "tendermint.abci.types.BlockID") - golang_proto.RegisterType((*BlockID)(nil), "tendermint.abci.types.BlockID") - proto.RegisterType((*PartSetHeader)(nil), "tendermint.abci.types.PartSetHeader") - golang_proto.RegisterType((*PartSetHeader)(nil), "tendermint.abci.types.PartSetHeader") - proto.RegisterType((*Validator)(nil), "tendermint.abci.types.Validator") - golang_proto.RegisterType((*Validator)(nil), "tendermint.abci.types.Validator") - proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.types.ValidatorUpdate") - golang_proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.types.ValidatorUpdate") - proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.types.VoteInfo") - golang_proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.types.VoteInfo") - proto.RegisterType((*PubKey)(nil), "tendermint.abci.types.PubKey") - golang_proto.RegisterType((*PubKey)(nil), "tendermint.abci.types.PubKey") - proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") - golang_proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") -} - -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } -func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } - -var fileDescriptor_9f1eaa49c51fa1ac = []byte{ - // 2386 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4d, 0x90, 0x1b, 0x47, - 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xfe, 0xc8, 0x6d, 0x27, 0x91, 0x85, 0xb3, 0xeb, 0x9a, - 0x8d, 0xed, 0x75, 0x12, 0xb4, 0x61, 0xa9, 0x50, 0x31, 0x76, 0x85, 0x5a, 0xad, 0x1d, 0xa4, 0x8a, - 0xed, 0x6c, 0xc6, 0xf6, 0x62, 0xa0, 0x2a, 0x53, 0x2d, 0x4d, 0x5b, 0x9a, 0x5a, 0x69, 0x66, 0x32, - 0xd3, 0x92, 0x25, 0x8a, 0x3b, 0x45, 0x15, 0x07, 0x2e, 0x54, 0x71, 0xe1, 0xce, 0x91, 0x03, 0x87, - 0x1c, 0x39, 0xe6, 0xc0, 0x81, 0x03, 0x67, 0x03, 0x0b, 0x27, 0x2a, 0x47, 0x8a, 0xe2, 0x48, 0xf5, - 0xeb, 0x9e, 0x3f, 0xad, 0xb4, 0x1a, 0x07, 0xdf, 0xb8, 0x48, 0xd3, 0x3d, 0xef, 0xbd, 0xee, 0x7e, - 0xfd, 0xde, 0xfb, 0xde, 0x7b, 0x03, 0xaf, 0xd3, 0x76, 0xc7, 0xde, 0xe3, 0x13, 0x8f, 0x05, 0xf2, - 0xb7, 0xee, 0xf9, 0x2e, 0x77, 0xc9, 0x6b, 0x9c, 0x39, 0x16, 0xf3, 0x07, 0xb6, 0xc3, 0xeb, 0x82, - 0xa4, 0x8e, 0x2f, 0x6b, 0xd7, 0x79, 0xcf, 0xf6, 0x2d, 0xd3, 0xa3, 0x3e, 0x9f, 0xec, 0x21, 0xe5, - 0x5e, 0xd7, 0xed, 0xba, 0xf1, 0x93, 0x64, 0xaf, 0xd5, 0x3a, 0xfe, 0xc4, 0xe3, 0xee, 0xde, 0x80, - 0xf9, 0x27, 0x7d, 0xa6, 0xfe, 0xd4, 0xbb, 0x8b, 0x7d, 0xbb, 0x1d, 0xec, 0x9d, 0x8c, 0x92, 0xeb, - 0xd5, 0xb6, 0xbb, 0xae, 0xdb, 0xed, 0x33, 0x29, 0xb3, 0x3d, 0x7c, 0xb6, 0xc7, 0xed, 0x01, 0x0b, - 0x38, 0x1d, 0x78, 0x8a, 0x60, 0x6b, 0x9a, 0xc0, 0x1a, 0xfa, 0x94, 0xdb, 0xae, 0x23, 0xdf, 0xeb, - 0xff, 0x5e, 0x81, 0x82, 0xc1, 0x3e, 0x1f, 0xb2, 0x80, 0x93, 0x0f, 0x20, 0xcf, 0x3a, 0x3d, 0xb7, - 0x9a, 0xbb, 0xaa, 0xed, 0x96, 0xf7, 0xf5, 0xfa, 0xcc, 0xb3, 0xd4, 0x15, 0xf5, 0xbd, 0x4e, 0xcf, - 0x6d, 0x2e, 0x19, 0xc8, 0x41, 0x6e, 0xc3, 0xca, 0xb3, 0xfe, 0x30, 0xe8, 0x55, 0x97, 0x91, 0x75, - 0xe7, 0x7c, 0xd6, 0x8f, 0x04, 0x69, 0x73, 0xc9, 0x90, 0x3c, 0x62, 0x59, 0xdb, 0x79, 0xe6, 0x56, - 0xf3, 0x59, 0x96, 0x6d, 0x39, 0xcf, 0x70, 0x59, 0xc1, 0x41, 0x9a, 0x00, 0x01, 0xe3, 0xa6, 0xeb, - 0x89, 0x03, 0x55, 0x57, 0x90, 0xff, 0xc6, 0xf9, 0xfc, 0x8f, 0x18, 0xff, 0x04, 0xc9, 0x9b, 0x4b, - 0x46, 0x29, 0x08, 0x07, 0x42, 0x92, 0xed, 0xd8, 0xdc, 0xec, 0xf4, 0xa8, 0xed, 0x54, 0x57, 0xb3, - 0x48, 0x6a, 0x39, 0x36, 0x3f, 0x14, 0xe4, 0x42, 0x92, 0x1d, 0x0e, 0x84, 0x2a, 0x3e, 0x1f, 0x32, - 0x7f, 0x52, 0x2d, 0x64, 0x51, 0xc5, 0xa7, 0x82, 0x54, 0xa8, 0x02, 0x79, 0xc8, 0xc7, 0x50, 0x6e, - 0xb3, 0xae, 0xed, 0x98, 0xed, 0xbe, 0xdb, 0x39, 0xa9, 0x16, 0x51, 0xc4, 0xee, 0xf9, 0x22, 0x1a, - 0x82, 0xa1, 0x21, 0xe8, 0x9b, 0x4b, 0x06, 0xb4, 0xa3, 0x11, 0x69, 0x40, 0xb1, 0xd3, 0x63, 0x9d, - 0x13, 0x93, 0x8f, 0xab, 0x25, 0x94, 0x74, 0xed, 0x7c, 0x49, 0x87, 0x82, 0xfa, 0xf1, 0xb8, 0xb9, - 0x64, 0x14, 0x3a, 0xf2, 0x51, 0xe8, 0xc5, 0x62, 0x7d, 0x7b, 0xc4, 0x7c, 0x21, 0xe5, 0x62, 0x16, - 0xbd, 0xdc, 0x95, 0xf4, 0x28, 0xa7, 0x64, 0x85, 0x03, 0x72, 0x0f, 0x4a, 0xcc, 0xb1, 0xd4, 0xc1, - 0xca, 0x28, 0xe8, 0xfa, 0x02, 0x0b, 0x73, 0xac, 0xf0, 0x58, 0x45, 0xa6, 0x9e, 0xc9, 0x87, 0xb0, - 0xda, 0x71, 0x07, 0x03, 0x9b, 0x57, 0xd7, 0x50, 0xc6, 0x5b, 0x0b, 0x8e, 0x84, 0xb4, 0xcd, 0x25, - 0x43, 0x71, 0x35, 0x0a, 0xb0, 0x32, 0xa2, 0xfd, 0x21, 0xd3, 0x6f, 0x40, 0x39, 0x61, 0xc9, 0xa4, - 0x0a, 0x85, 0x01, 0x0b, 0x02, 0xda, 0x65, 0x55, 0xed, 0xaa, 0xb6, 0x5b, 0x32, 0xc2, 0xa1, 0xbe, - 0x01, 0x6b, 0x49, 0xbb, 0xd5, 0x07, 0x11, 0xa3, 0xb0, 0x45, 0xc1, 0x38, 0x62, 0x7e, 0x20, 0x0c, - 0x50, 0x31, 0xaa, 0x21, 0xd9, 0x81, 0x75, 0x3c, 0xad, 0x19, 0xbe, 0x17, 0x7e, 0x95, 0x37, 0xd6, - 0x70, 0xf2, 0x58, 0x11, 0x6d, 0x43, 0xd9, 0xdb, 0xf7, 0x22, 0x92, 0x65, 0x24, 0x01, 0x6f, 0xdf, - 0x53, 0x04, 0xfa, 0x77, 0xa1, 0x32, 0x6d, 0xba, 0xa4, 0x02, 0xcb, 0x27, 0x6c, 0xa2, 0xd6, 0x13, - 0x8f, 0xe4, 0x92, 0x3a, 0x16, 0xae, 0x51, 0x32, 0xd4, 0x19, 0x7f, 0x97, 0x8b, 0x98, 0x23, 0x6b, - 0x15, 0xee, 0x26, 0x82, 0x04, 0x72, 0x97, 0xf7, 0x6b, 0x75, 0x19, 0x20, 0xea, 0x61, 0x80, 0xa8, - 0x3f, 0x0e, 0x23, 0x48, 0xa3, 0xf8, 0xe5, 0x8b, 0xed, 0xa5, 0x5f, 0xfe, 0x65, 0x5b, 0x33, 0x90, - 0x83, 0x5c, 0x16, 0x06, 0x45, 0x6d, 0xc7, 0xb4, 0x2d, 0xb5, 0x4e, 0x01, 0xc7, 0x2d, 0x8b, 0x7c, - 0x0a, 0x95, 0x8e, 0xeb, 0x04, 0xcc, 0x09, 0x86, 0x81, 0x08, 0x73, 0x74, 0x10, 0xa8, 0x58, 0x30, - 0xef, 0x92, 0x0f, 0x43, 0xf2, 0x23, 0xa4, 0x36, 0x36, 0x3b, 0xe9, 0x09, 0x72, 0x1f, 0x60, 0x44, - 0xfb, 0xb6, 0x45, 0xb9, 0xeb, 0x07, 0xd5, 0xfc, 0xd5, 0xe5, 0x73, 0x84, 0x1d, 0x87, 0x84, 0x4f, - 0x3c, 0x8b, 0x72, 0xd6, 0xc8, 0x8b, 0x9d, 0x1b, 0x09, 0x7e, 0x72, 0x1d, 0x36, 0xa9, 0xe7, 0x99, - 0x01, 0xa7, 0x9c, 0x99, 0xed, 0x09, 0x67, 0x01, 0xc6, 0x8b, 0x35, 0x63, 0x9d, 0x7a, 0xde, 0x23, - 0x31, 0xdb, 0x10, 0x93, 0xba, 0x15, 0xdd, 0x36, 0xba, 0x26, 0x21, 0x90, 0xb7, 0x28, 0xa7, 0xa8, - 0xad, 0x35, 0x03, 0x9f, 0xc5, 0x9c, 0x47, 0x79, 0x4f, 0xe9, 0x00, 0x9f, 0xc9, 0xeb, 0xb0, 0xda, - 0x63, 0x76, 0xb7, 0xc7, 0xf1, 0xd8, 0xcb, 0x86, 0x1a, 0x89, 0x8b, 0xf1, 0x7c, 0x77, 0xc4, 0x30, - 0xba, 0x15, 0x0d, 0x39, 0xd0, 0x7f, 0x95, 0x83, 0x0b, 0x67, 0xdc, 0x57, 0xc8, 0xed, 0xd1, 0xa0, - 0x17, 0xae, 0x25, 0x9e, 0xc9, 0x6d, 0x21, 0x97, 0x5a, 0xcc, 0x57, 0x51, 0xf9, 0xcd, 0x39, 0x1a, - 0x68, 0x22, 0x91, 0x3a, 0xb8, 0x62, 0x21, 0x4f, 0xa0, 0xd2, 0xa7, 0x01, 0x37, 0xa5, 0xed, 0x9b, - 0x18, 0x65, 0x97, 0xcf, 0x8d, 0x04, 0xf7, 0x69, 0xe8, 0x33, 0xc2, 0xb8, 0x95, 0xb8, 0x8d, 0x7e, - 0x6a, 0x96, 0x3c, 0x85, 0x4b, 0xed, 0xc9, 0x4f, 0xa8, 0xc3, 0x6d, 0x87, 0x99, 0x67, 0xee, 0x68, - 0x7b, 0x8e, 0xe8, 0x7b, 0x23, 0xdb, 0x62, 0x4e, 0x27, 0xbc, 0x9c, 0x8b, 0x91, 0x88, 0xe8, 0xf2, - 0x02, 0xfd, 0x29, 0x6c, 0xa4, 0x63, 0x11, 0xd9, 0x80, 0x1c, 0x1f, 0x2b, 0x8d, 0xe4, 0xf8, 0x98, - 0x7c, 0x07, 0xf2, 0x42, 0x1c, 0x6a, 0x63, 0x63, 0x2e, 0x58, 0x28, 0xee, 0xc7, 0x13, 0x8f, 0x19, - 0x48, 0xaf, 0xeb, 0x91, 0x27, 0x44, 0xf1, 0x69, 0x5a, 0xb6, 0x7e, 0x13, 0x36, 0xa7, 0x42, 0x4f, - 0xe2, 0x5a, 0xb5, 0xe4, 0xb5, 0xea, 0x9b, 0xb0, 0x9e, 0x8a, 0x30, 0xfa, 0x1f, 0x57, 0xa1, 0x68, - 0xb0, 0xc0, 0x13, 0x46, 0x4c, 0x9a, 0x50, 0x62, 0xe3, 0x0e, 0x93, 0xb0, 0xa4, 0x2d, 0x08, 0xe2, - 0x92, 0xe7, 0x5e, 0x48, 0x2f, 0xa2, 0x66, 0xc4, 0x4c, 0x6e, 0xa5, 0x20, 0x79, 0x67, 0x91, 0x90, - 0x24, 0x26, 0xdf, 0x49, 0x63, 0xf2, 0x5b, 0x0b, 0x78, 0xa7, 0x40, 0xf9, 0x56, 0x0a, 0x94, 0x17, - 0x2d, 0x9c, 0x42, 0xe5, 0xd6, 0x0c, 0x54, 0x5e, 0x74, 0xfc, 0x39, 0xb0, 0xdc, 0x9a, 0x01, 0xcb, - 0xbb, 0x0b, 0xf7, 0x32, 0x13, 0x97, 0xef, 0xa4, 0x71, 0x79, 0x91, 0x3a, 0xa6, 0x80, 0xf9, 0xfe, - 0x2c, 0x60, 0xbe, 0xb9, 0x40, 0xc6, 0x5c, 0x64, 0x3e, 0x3c, 0x83, 0xcc, 0xd7, 0x17, 0x88, 0x9a, - 0x01, 0xcd, 0xad, 0x14, 0x34, 0x43, 0x26, 0xdd, 0xcc, 0xc1, 0xe6, 0x8f, 0xce, 0x62, 0xf3, 0x8d, - 0x45, 0xa6, 0x36, 0x0b, 0x9c, 0xbf, 0x37, 0x05, 0xce, 0xd7, 0x16, 0x9d, 0x6a, 0x2e, 0x3a, 0xdf, - 0x14, 0xf1, 0x71, 0xca, 0x33, 0x44, 0x2c, 0x65, 0xbe, 0xef, 0xfa, 0x0a, 0xf8, 0xe4, 0x40, 0xdf, - 0x15, 0x11, 0x3b, 0xb6, 0xff, 0x73, 0x90, 0x1c, 0x9d, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, - 0x8b, 0x91, 0x2d, 0x19, 0xed, 0x4b, 0x2a, 0xda, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x6d, 0x28, - 0x0b, 0x4c, 0x99, 0xc2, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x0b, 0x18, 0x7f, 0x65, 0x1a, - 0xa0, 0x02, 0x49, 0x1e, 0x03, 0xc9, 0xa6, 0x78, 0x21, 0x35, 0x28, 0x81, 0xe2, 0x9b, 0x70, 0x31, - 0x41, 0x2b, 0xe4, 0x22, 0x16, 0x48, 0x90, 0xaa, 0x44, 0xd4, 0x07, 0x9e, 0xd7, 0xa4, 0x41, 0x4f, - 0x7f, 0x10, 0x2b, 0x28, 0xce, 0x0b, 0x08, 0xe4, 0x3b, 0xae, 0x25, 0xcf, 0xbd, 0x6e, 0xe0, 0xb3, - 0xc8, 0x15, 0xfa, 0x6e, 0x17, 0x37, 0x57, 0x32, 0xc4, 0xa3, 0xa0, 0x8a, 0x5c, 0xbb, 0x24, 0x7d, - 0x56, 0xff, 0xbd, 0x16, 0xcb, 0x8b, 0x53, 0x85, 0x59, 0xa8, 0xae, 0xbd, 0x4a, 0x54, 0xcf, 0xfd, - 0x6f, 0xa8, 0xae, 0xff, 0x4b, 0x8b, 0xaf, 0x34, 0xc2, 0xeb, 0xaf, 0xa7, 0x02, 0x61, 0x5d, 0xb6, - 0x63, 0xb1, 0x31, 0xaa, 0x7c, 0xd9, 0x90, 0x83, 0x30, 0xd5, 0x5a, 0xc5, 0x6b, 0x48, 0xa7, 0x5a, - 0x05, 0x9c, 0x93, 0x03, 0xf2, 0x3e, 0xe2, 0xbc, 0xfb, 0x4c, 0x85, 0x86, 0x14, 0x08, 0xca, 0xa2, - 0xae, 0xae, 0xaa, 0xb9, 0x23, 0x41, 0x66, 0x48, 0xea, 0x04, 0xbe, 0x94, 0x52, 0x69, 0xc3, 0x15, - 0x28, 0x89, 0xad, 0x07, 0x1e, 0xed, 0x30, 0xf4, 0xed, 0x92, 0x11, 0x4f, 0xe8, 0x16, 0x90, 0xb3, - 0x31, 0x86, 0x3c, 0x84, 0x55, 0x36, 0x62, 0x0e, 0x17, 0x77, 0x24, 0xd4, 0x7a, 0x65, 0x2e, 0x10, - 0x33, 0x87, 0x37, 0xaa, 0x42, 0x99, 0xff, 0x7c, 0xb1, 0x5d, 0x91, 0x3c, 0xef, 0xba, 0x03, 0x9b, - 0xb3, 0x81, 0xc7, 0x27, 0x86, 0x92, 0xa2, 0xff, 0x2c, 0x27, 0xf0, 0x30, 0x15, 0x7f, 0x66, 0xaa, - 0x37, 0x74, 0x9a, 0x5c, 0x22, 0x45, 0xca, 0xa6, 0xf2, 0x37, 0x01, 0xba, 0x34, 0x30, 0x9f, 0x53, - 0x87, 0x33, 0x4b, 0xe9, 0xbd, 0xd4, 0xa5, 0xc1, 0x0f, 0x70, 0x42, 0xe4, 0x9b, 0xe2, 0xf5, 0x30, - 0x60, 0x16, 0x5e, 0xc0, 0xb2, 0x51, 0xe8, 0xd2, 0xe0, 0x49, 0xc0, 0xac, 0xc4, 0x59, 0x0b, 0xaf, - 0xe2, 0xac, 0x69, 0x7d, 0x17, 0xa7, 0xf5, 0xfd, 0xf3, 0x5c, 0xec, 0x1d, 0x71, 0xfa, 0xf0, 0xff, - 0xa9, 0x8b, 0xdf, 0x60, 0x4d, 0x91, 0x06, 0x01, 0xf2, 0x43, 0xb8, 0x10, 0x79, 0xa5, 0x39, 0x44, - 0x6f, 0x0d, 0xad, 0xf0, 0xe5, 0x9c, 0xbb, 0x32, 0x4a, 0x4f, 0x07, 0xe4, 0x33, 0x78, 0x63, 0x2a, - 0x06, 0x45, 0x0b, 0xe4, 0x5e, 0x2a, 0x14, 0xbd, 0x96, 0x0e, 0x45, 0xa1, 0xfc, 0x58, 0x7b, 0xcb, - 0xaf, 0xc4, 0x6b, 0x5a, 0x22, 0x85, 0x4d, 0xc2, 0xdb, 0x4c, 0x9b, 0xd8, 0x81, 0x75, 0x9f, 0x71, - 0x51, 0x4b, 0xa5, 0xaa, 0x86, 0x35, 0x39, 0x29, 0x21, 0x41, 0xff, 0xb3, 0x06, 0x9b, 0x53, 0xa7, - 0x20, 0x1f, 0xc0, 0x8a, 0x84, 0x69, 0xed, 0xdc, 0x6e, 0x09, 0x5e, 0x8b, 0x3a, 0xb8, 0x64, 0x20, - 0x07, 0x50, 0x64, 0x2a, 0x05, 0x57, 0x9a, 0xbb, 0xb6, 0x20, 0x53, 0x57, 0xfc, 0x11, 0x1b, 0xb9, - 0x0b, 0xa5, 0xe8, 0x7e, 0x16, 0x94, 0x77, 0xd1, 0xf5, 0x2a, 0x21, 0x31, 0xa3, 0x7e, 0x08, 0xe5, - 0xc4, 0xf6, 0xc8, 0x37, 0xa0, 0x34, 0xa0, 0x63, 0x55, 0x93, 0xc9, 0x2c, 0xbb, 0x38, 0xa0, 0x63, - 0x2c, 0xc7, 0xc8, 0x1b, 0x50, 0x10, 0x2f, 0xbb, 0x54, 0xde, 0xf6, 0xb2, 0xb1, 0x3a, 0xa0, 0xe3, - 0xef, 0xd3, 0x40, 0xff, 0x85, 0x06, 0x1b, 0xe9, 0x7d, 0x92, 0x77, 0x80, 0x08, 0x5a, 0xda, 0x65, - 0xa6, 0x33, 0x1c, 0x48, 0x20, 0x0d, 0x25, 0x6e, 0x0e, 0xe8, 0xf8, 0xa0, 0xcb, 0x1e, 0x0e, 0x07, - 0xb8, 0x74, 0x40, 0x1e, 0x40, 0x25, 0x24, 0x0e, 0x3b, 0x62, 0x4a, 0x2b, 0x97, 0xcf, 0x54, 0xc4, - 0x77, 0x15, 0x81, 0x2c, 0x88, 0x7f, 0x2d, 0x0a, 0xe2, 0x0d, 0x29, 0x2f, 0x7c, 0xa3, 0xbf, 0x0f, - 0x9b, 0x53, 0x27, 0x26, 0x3a, 0xac, 0x7b, 0xc3, 0xb6, 0x79, 0xc2, 0x26, 0x26, 0xaa, 0x04, 0xfd, - 0xa1, 0x64, 0x94, 0xbd, 0x61, 0xfb, 0x63, 0x36, 0x11, 0xa5, 0x49, 0xa0, 0x77, 0x60, 0x23, 0x5d, - 0x71, 0x09, 0x74, 0xf1, 0xdd, 0xa1, 0x63, 0xe1, 0xbe, 0x57, 0x0c, 0x39, 0x20, 0xb7, 0x61, 0x65, - 0xe4, 0x4a, 0x93, 0x3f, 0xaf, 0xc4, 0x3a, 0x76, 0x39, 0x4b, 0xd4, 0x6d, 0x92, 0x47, 0x0f, 0x60, - 0x05, 0x8d, 0x57, 0x18, 0x22, 0xd6, 0x4e, 0x2a, 0xbb, 0x11, 0xcf, 0xe4, 0x18, 0x80, 0x72, 0xee, - 0xdb, 0xed, 0x61, 0x2c, 0xbe, 0x9a, 0x14, 0xdf, 0xb7, 0xdb, 0x41, 0xfd, 0x64, 0x54, 0x3f, 0xa2, - 0xb6, 0xdf, 0xb8, 0xa2, 0xcc, 0xff, 0x52, 0xcc, 0x93, 0x70, 0x81, 0x84, 0x24, 0xfd, 0xab, 0x3c, - 0xac, 0xca, 0x9a, 0x94, 0x7c, 0x98, 0xee, 0x90, 0x94, 0xf7, 0xb7, 0xe6, 0x6d, 0x5f, 0x52, 0xa9, - 0xdd, 0x47, 0x69, 0xd6, 0xf5, 0xe9, 0xb6, 0x43, 0xa3, 0x7c, 0xfa, 0x62, 0xbb, 0x80, 0x29, 0x4a, - 0xeb, 0x6e, 0xdc, 0x83, 0x98, 0x57, 0x82, 0x87, 0x0d, 0x8f, 0xfc, 0x4b, 0x37, 0x3c, 0x9a, 0xb0, - 0x9e, 0xc8, 0xc9, 0x6c, 0x4b, 0x15, 0x33, 0x5b, 0xe7, 0x39, 0x5d, 0xeb, 0xae, 0xda, 0x7f, 0x39, - 0xca, 0xd9, 0x5a, 0x16, 0xd9, 0x4d, 0x57, 0xe2, 0x98, 0xda, 0xc9, 0x9c, 0x22, 0x51, 0x5c, 0x8b, - 0xc4, 0x4e, 0xb8, 0x83, 0x88, 0x10, 0x92, 0x44, 0xa6, 0x18, 0x45, 0x31, 0x81, 0x2f, 0x6f, 0xc0, - 0x66, 0x9c, 0xfd, 0x48, 0x92, 0xa2, 0x94, 0x12, 0x4f, 0x23, 0xe1, 0x7b, 0x70, 0xc9, 0x61, 0x63, - 0x6e, 0x4e, 0x53, 0x97, 0x90, 0x9a, 0x88, 0x77, 0xc7, 0x69, 0x8e, 0x6b, 0xb0, 0x11, 0xc7, 0x59, - 0xa4, 0x05, 0xd9, 0x1f, 0x89, 0x66, 0x91, 0xec, 0x32, 0x14, 0xa3, 0xdc, 0xb4, 0x8c, 0x04, 0x05, - 0x2a, 0x53, 0xd2, 0x28, 0xdb, 0xf5, 0x59, 0x30, 0xec, 0x73, 0x25, 0x64, 0x0d, 0x69, 0x30, 0xdb, - 0x35, 0xe4, 0x3c, 0xd2, 0xee, 0xc0, 0x7a, 0x18, 0x55, 0x24, 0xdd, 0x3a, 0xd2, 0xad, 0x85, 0x93, - 0x48, 0x74, 0x13, 0x2a, 0x9e, 0xef, 0x7a, 0x6e, 0xc0, 0x7c, 0x93, 0x5a, 0x96, 0xcf, 0x82, 0xa0, - 0xba, 0x21, 0xe5, 0x85, 0xf3, 0x07, 0x72, 0x5a, 0xff, 0x16, 0x14, 0xc2, 0xa4, 0xfb, 0x12, 0xac, - 0x34, 0xa2, 0x08, 0x99, 0x37, 0xe4, 0x40, 0x80, 0xf0, 0x81, 0xe7, 0xa9, 0x16, 0x9c, 0x78, 0xd4, - 0xfb, 0x50, 0x50, 0x17, 0x36, 0xb3, 0xf1, 0xf2, 0x00, 0xd6, 0x3c, 0xea, 0x8b, 0x63, 0x24, 0xdb, - 0x2f, 0xf3, 0xca, 0xc6, 0x23, 0xea, 0xf3, 0x47, 0x8c, 0xa7, 0xba, 0x30, 0x65, 0xe4, 0x97, 0x53, - 0xfa, 0x2d, 0x58, 0x4f, 0xd1, 0x88, 0x6d, 0x72, 0x97, 0xd3, 0x7e, 0xe8, 0xe8, 0x38, 0x88, 0x76, - 0x92, 0x8b, 0x77, 0xa2, 0xdf, 0x86, 0x52, 0x74, 0x57, 0xa2, 0x1a, 0x09, 0x55, 0xa1, 0x29, 0xf5, - 0xcb, 0x21, 0x76, 0x9a, 0xdc, 0xe7, 0xcc, 0x57, 0xd6, 0x2f, 0x07, 0x3a, 0x4b, 0x04, 0x26, 0x09, - 0x79, 0xe4, 0x0e, 0x14, 0x54, 0x60, 0x52, 0xfe, 0x38, 0xaf, 0xa7, 0x74, 0x84, 0x91, 0x2a, 0xec, - 0x29, 0xc9, 0xb8, 0x15, 0x2f, 0x93, 0x4b, 0x2e, 0xf3, 0x53, 0x28, 0x86, 0xc1, 0x27, 0x8d, 0x12, - 0x72, 0x85, 0xab, 0x8b, 0x50, 0x42, 0x2d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, 0xae, 0xc3, 0x2c, - 0x33, 0x76, 0x41, 0x5c, 0xb3, 0x68, 0x6c, 0xca, 0x17, 0xf7, 0x43, 0xff, 0xd2, 0xdf, 0x83, 0x55, - 0xb9, 0xd7, 0x99, 0x21, 0x6e, 0x06, 0xfe, 0xea, 0xff, 0xd0, 0xa0, 0x18, 0xc2, 0xc7, 0x4c, 0xa6, - 0xd4, 0x21, 0x72, 0x5f, 0xf7, 0x10, 0xaf, 0x3e, 0x24, 0xbd, 0x0b, 0x04, 0x2d, 0xc5, 0x1c, 0xb9, - 0xdc, 0x76, 0xba, 0xa6, 0xbc, 0x0b, 0x99, 0x2e, 0x56, 0xf0, 0xcd, 0x31, 0xbe, 0x38, 0x12, 0xf3, - 0x6f, 0xef, 0x40, 0x39, 0xd1, 0x0a, 0x23, 0x05, 0x58, 0x7e, 0xc8, 0x9e, 0x57, 0x96, 0x48, 0x19, - 0x0a, 0x06, 0xc3, 0x46, 0x42, 0x45, 0xdb, 0xff, 0xaa, 0x00, 0x9b, 0x07, 0x8d, 0xc3, 0xd6, 0x81, - 0xe7, 0xf5, 0xed, 0x0e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x58, 0x4c, 0x67, 0xf8, 0x08, 0x54, 0xcb, - 0xd2, 0x95, 0x22, 0x06, 0xac, 0x60, 0xcd, 0x4d, 0xb2, 0x7c, 0x1b, 0xaa, 0x65, 0x6a, 0x56, 0x89, - 0x4d, 0xa2, 0xc1, 0x65, 0xf8, 0x64, 0x54, 0xcb, 0xd2, 0xc1, 0x22, 0x9f, 0x41, 0x29, 0x2e, 0xa6, - 0xb3, 0x7e, 0x48, 0xaa, 0x65, 0xee, 0x6d, 0x09, 0xf9, 0x71, 0xf9, 0x90, 0xf5, 0x33, 0x4a, 0x2d, - 0x73, 0x53, 0x87, 0x3c, 0x85, 0x42, 0x58, 0xa8, 0x65, 0xfb, 0xd4, 0x53, 0xcb, 0xd8, 0x77, 0x12, - 0xd7, 0x27, 0xeb, 0xeb, 0x2c, 0xdf, 0xb3, 0x6a, 0x99, 0x9a, 0x6b, 0xe4, 0x09, 0xac, 0xaa, 0x0c, - 0x39, 0xd3, 0x47, 0x9c, 0x5a, 0xb6, 0x6e, 0x92, 0x50, 0x72, 0xdc, 0xc1, 0xc8, 0xfa, 0x0d, 0xaf, - 0x96, 0xb9, 0xab, 0x48, 0x28, 0x40, 0xa2, 0xe8, 0xce, 0xfc, 0x71, 0xae, 0x96, 0xbd, 0x5b, 0x48, - 0x7e, 0x0c, 0xc5, 0xa8, 0xb4, 0xca, 0xf8, 0x91, 0xac, 0x96, 0xb5, 0x61, 0xd7, 0x68, 0xfd, 0xe7, - 0x6f, 0x5b, 0xda, 0x6f, 0x4f, 0xb7, 0xb4, 0x2f, 0x4e, 0xb7, 0xb4, 0x2f, 0x4f, 0xb7, 0xb4, 0x3f, - 0x9d, 0x6e, 0x69, 0x7f, 0x3d, 0xdd, 0xd2, 0xfe, 0xf0, 0xf7, 0x2d, 0xed, 0x47, 0xef, 0x74, 0x6d, - 0xde, 0x1b, 0xb6, 0xeb, 0x1d, 0x77, 0xb0, 0x17, 0x0b, 0x4c, 0x3e, 0xc6, 0x5f, 0xbe, 0xdb, 0xab, - 0x18, 0xb0, 0xbe, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x66, 0x8a, 0xe9, 0x0e, 0x1f, - 0x00, 0x00, -} - -func (this *Request) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request) - if !ok { - that2, ok := that.(Request) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Value == nil { - if this.Value != nil { - return false - } - } else if this.Value == nil { - return false - } else if !this.Value.Equal(that1.Value) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true +type Snapshot struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (this *Request_Echo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Request_Echo) - if !ok { - that2, ok := that.(Request_Echo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Echo.Equal(that1.Echo) { - return false - } - return true +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{41} } -func (this *Request_Flush) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request_Flush) - if !ok { - that2, ok := that.(Request_Flush) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Flush.Equal(that1.Flush) { - return false - } - return true +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (this *Request_Info) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request_Info) - if !ok { - that2, ok := that.(Request_Info) - if ok { - that1 = &that2 - } else { - return false +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Info.Equal(that1.Info) { - return false - } - return true } -func (this *Request_SetOption) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request_SetOption) - if !ok { - that2, ok := that.(Request_SetOption) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.SetOption.Equal(that1.SetOption) { - return false - } - return true +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) } -func (this *Request_InitChain) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request_InitChain) - if !ok { - that2, ok := that.(Request_InitChain) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.InitChain.Equal(that1.InitChain) { - return false - } - return true +func (m *Snapshot) XXX_Size() int { + return m.Size() } -func (this *Request_Query) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Request_Query) - if !ok { - that2, ok := that.(Request_Query) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Query.Equal(that1.Query) { - return false - } - return true +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) } -func (this *Request_BeginBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Request_BeginBlock) - if !ok { - that2, ok := that.(Request_BeginBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.BeginBlock.Equal(that1.BeginBlock) { - return false - } - return true -} -func (this *Request_CheckTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } +var xxx_messageInfo_Snapshot proto.InternalMessageInfo - that1, ok := that.(*Request_CheckTx) - if !ok { - that2, ok := that.(Request_CheckTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.CheckTx.Equal(that1.CheckTx) { - return false +func (m *Snapshot) GetHeight() uint64 { + if m != nil { + return m.Height } - return true + return 0 } -func (this *Request_DeliverTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Request_DeliverTx) - if !ok { - that2, ok := that.(Request_DeliverTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.DeliverTx.Equal(that1.DeliverTx) { - return false +func (m *Snapshot) GetFormat() uint32 { + if m != nil { + return m.Format } - return true + return 0 } -func (this *Request_EndBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Request_EndBlock) - if !ok { - that2, ok := that.(Request_EndBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.EndBlock.Equal(that1.EndBlock) { - return false +func (m *Snapshot) GetChunks() uint32 { + if m != nil { + return m.Chunks } - return true + return 0 } -func (this *Request_Commit) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*Request_Commit) - if !ok { - that2, ok := that.(Request_Commit) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Commit.Equal(that1.Commit) { - return false +func (m *Snapshot) GetHash() []byte { + if m != nil { + return m.Hash } - return true + return nil } -func (this *RequestEcho) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*RequestEcho) - if !ok { - that2, ok := that.(RequestEcho) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Message != that1.Message { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false +func (m *Snapshot) GetMetadata() []byte { + if m != nil { + return m.Metadata } - return true + return nil } -func (this *RequestFlush) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - that1, ok := that.(*RequestFlush) - if !ok { - that2, ok := that.(RequestFlush) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestInfo) - if !ok { - that2, ok := that.(RequestInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Version != that1.Version { - return false - } - if this.BlockVersion != that1.BlockVersion { - return false - } - if this.P2PVersion != that1.P2PVersion { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestSetOption) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestSetOption) - if !ok { - that2, ok := that.(RequestSetOption) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Key != that1.Key { - return false - } - if this.Value != that1.Value { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestInitChain) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestInitChain) - if !ok { - that2, ok := that.(RequestInitChain) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Time.Equal(that1.Time) { - return false - } - if this.ChainId != that1.ChainId { - return false - } - if !this.ConsensusParams.Equal(that1.ConsensusParams) { - return false - } - if len(this.Validators) != len(that1.Validators) { - return false - } - for i := range this.Validators { - if !this.Validators[i].Equal(&that1.Validators[i]) { - return false - } - } - if !bytes.Equal(this.AppStateBytes, that1.AppStateBytes) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestQuery) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestQuery) - if !ok { - that2, ok := that.(RequestQuery) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if this.Path != that1.Path { - return false - } - if this.Height != that1.Height { - return false - } - if this.Prove != that1.Prove { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestBeginBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestBeginBlock) - if !ok { - that2, ok := that.(RequestBeginBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Hash, that1.Hash) { - return false - } - if !this.Header.Equal(&that1.Header) { - return false - } - if !this.LastCommitInfo.Equal(&that1.LastCommitInfo) { - return false - } - if len(this.ByzantineValidators) != len(that1.ByzantineValidators) { - return false - } - for i := range this.ByzantineValidators { - if !this.ByzantineValidators[i].Equal(&that1.ByzantineValidators[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestCheckTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestCheckTx) - if !ok { - that2, ok := that.(RequestCheckTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Tx, that1.Tx) { - return false - } - if this.Type != that1.Type { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestDeliverTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestDeliverTx) - if !ok { - that2, ok := that.(RequestDeliverTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Tx, that1.Tx) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestEndBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestEndBlock) - if !ok { - that2, ok := that.(RequestEndBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Height != that1.Height { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestCommit) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestCommit) - if !ok { - that2, ok := that.(RequestCommit) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Response) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response) - if !ok { - that2, ok := that.(Response) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Value == nil { - if this.Value != nil { - return false - } - } else if this.Value == nil { - return false - } else if !this.Value.Equal(that1.Value) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Response_Exception) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Exception) - if !ok { - that2, ok := that.(Response_Exception) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Exception.Equal(that1.Exception) { - return false - } - return true -} -func (this *Response_Echo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Echo) - if !ok { - that2, ok := that.(Response_Echo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Echo.Equal(that1.Echo) { - return false - } - return true -} -func (this *Response_Flush) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Flush) - if !ok { - that2, ok := that.(Response_Flush) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Flush.Equal(that1.Flush) { - return false - } - return true -} -func (this *Response_Info) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Info) - if !ok { - that2, ok := that.(Response_Info) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Info.Equal(that1.Info) { - return false - } - return true -} -func (this *Response_SetOption) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_SetOption) - if !ok { - that2, ok := that.(Response_SetOption) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.SetOption.Equal(that1.SetOption) { - return false - } - return true -} -func (this *Response_InitChain) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_InitChain) - if !ok { - that2, ok := that.(Response_InitChain) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.InitChain.Equal(that1.InitChain) { - return false - } - return true -} -func (this *Response_Query) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Query) - if !ok { - that2, ok := that.(Response_Query) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Query.Equal(that1.Query) { - return false - } - return true -} -func (this *Response_BeginBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_BeginBlock) - if !ok { - that2, ok := that.(Response_BeginBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.BeginBlock.Equal(that1.BeginBlock) { - return false - } - return true -} -func (this *Response_CheckTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_CheckTx) - if !ok { - that2, ok := that.(Response_CheckTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.CheckTx.Equal(that1.CheckTx) { - return false - } - return true -} -func (this *Response_DeliverTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_DeliverTx) - if !ok { - that2, ok := that.(Response_DeliverTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.DeliverTx.Equal(that1.DeliverTx) { - return false - } - return true -} -func (this *Response_EndBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_EndBlock) - if !ok { - that2, ok := that.(Response_EndBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.EndBlock.Equal(that1.EndBlock) { - return false - } - return true -} -func (this *Response_Commit) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Response_Commit) - if !ok { - that2, ok := that.(Response_Commit) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Commit.Equal(that1.Commit) { - return false - } - return true -} -func (this *ResponseException) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseException) - if !ok { - that2, ok := that.(ResponseException) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Error != that1.Error { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseEcho) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseEcho) - if !ok { - that2, ok := that.(ResponseEcho) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Message != that1.Message { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseFlush) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseFlush) - if !ok { - that2, ok := that.(ResponseFlush) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseInfo) - if !ok { - that2, ok := that.(ResponseInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Data != that1.Data { - return false - } - if this.Version != that1.Version { - return false - } - if this.AppVersion != that1.AppVersion { - return false - } - if this.LastBlockHeight != that1.LastBlockHeight { - return false - } - if !bytes.Equal(this.LastBlockAppHash, that1.LastBlockAppHash) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseSetOption) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseSetOption) - if !ok { - that2, ok := that.(ResponseSetOption) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Code != that1.Code { - return false - } - if this.Log != that1.Log { - return false - } - if this.Info != that1.Info { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseInitChain) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseInitChain) - if !ok { - that2, ok := that.(ResponseInitChain) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.ConsensusParams.Equal(that1.ConsensusParams) { - return false - } - if len(this.Validators) != len(that1.Validators) { - return false - } - for i := range this.Validators { - if !this.Validators[i].Equal(&that1.Validators[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseQuery) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseQuery) - if !ok { - that2, ok := that.(ResponseQuery) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Code != that1.Code { - return false - } - if this.Log != that1.Log { - return false - } - if this.Info != that1.Info { - return false - } - if this.Index != that1.Index { - return false - } - if !bytes.Equal(this.Key, that1.Key) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - if !this.Proof.Equal(that1.Proof) { - return false - } - if this.Height != that1.Height { - return false - } - if this.Codespace != that1.Codespace { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseBeginBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseBeginBlock) - if !ok { - that2, ok := that.(ResponseBeginBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Events) != len(that1.Events) { - return false - } - for i := range this.Events { - if !this.Events[i].Equal(&that1.Events[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseCheckTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseCheckTx) - if !ok { - that2, ok := that.(ResponseCheckTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Code != that1.Code { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if this.Log != that1.Log { - return false - } - if this.Info != that1.Info { - return false - } - if this.GasWanted != that1.GasWanted { - return false - } - if this.GasUsed != that1.GasUsed { - return false - } - if len(this.Events) != len(that1.Events) { - return false - } - for i := range this.Events { - if !this.Events[i].Equal(&that1.Events[i]) { - return false - } - } - if this.Codespace != that1.Codespace { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseDeliverTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseDeliverTx) - if !ok { - that2, ok := that.(ResponseDeliverTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Code != that1.Code { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if this.Log != that1.Log { - return false - } - if this.Info != that1.Info { - return false - } - if this.GasWanted != that1.GasWanted { - return false - } - if this.GasUsed != that1.GasUsed { - return false - } - if len(this.Events) != len(that1.Events) { - return false - } - for i := range this.Events { - if !this.Events[i].Equal(&that1.Events[i]) { - return false - } - } - if this.Codespace != that1.Codespace { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseEndBlock) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseEndBlock) - if !ok { - that2, ok := that.(ResponseEndBlock) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.ValidatorUpdates) != len(that1.ValidatorUpdates) { - return false - } - for i := range this.ValidatorUpdates { - if !this.ValidatorUpdates[i].Equal(&that1.ValidatorUpdates[i]) { - return false - } - } - if !this.ConsensusParamUpdates.Equal(that1.ConsensusParamUpdates) { - return false - } - if len(this.Events) != len(that1.Events) { - return false - } - for i := range this.Events { - if !this.Events[i].Equal(&that1.Events[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseCommit) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseCommit) - if !ok { - that2, ok := that.(ResponseCommit) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if this.RetainHeight != that1.RetainHeight { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ConsensusParams) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ConsensusParams) - if !ok { - that2, ok := that.(ConsensusParams) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Block.Equal(that1.Block) { - return false - } - if !this.Evidence.Equal(that1.Evidence) { - return false - } - if !this.Validator.Equal(that1.Validator) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *BlockParams) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BlockParams) - if !ok { - that2, ok := that.(BlockParams) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.MaxBytes != that1.MaxBytes { - return false - } - if this.MaxGas != that1.MaxGas { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *EvidenceParams) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*EvidenceParams) - if !ok { - that2, ok := that.(EvidenceParams) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.MaxAgeNumBlocks != that1.MaxAgeNumBlocks { - return false - } - if this.MaxAgeDuration != that1.MaxAgeDuration { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ValidatorParams) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ValidatorParams) - if !ok { - that2, ok := that.(ValidatorParams) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.PubKeyTypes) != len(that1.PubKeyTypes) { - return false - } - for i := range this.PubKeyTypes { - if this.PubKeyTypes[i] != that1.PubKeyTypes[i] { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *LastCommitInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LastCommitInfo) - if !ok { - that2, ok := that.(LastCommitInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Round != that1.Round { - return false - } - if len(this.Votes) != len(that1.Votes) { - return false - } - for i := range this.Votes { - if !this.Votes[i].Equal(&that1.Votes[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Event) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Event) - if !ok { - that2, ok := that.(Event) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Header) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Header) - if !ok { - that2, ok := that.(Header) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Version.Equal(&that1.Version) { - return false - } - if this.ChainID != that1.ChainID { - return false - } - if this.Height != that1.Height { - return false - } - if !this.Time.Equal(that1.Time) { - return false - } - if !this.LastBlockId.Equal(&that1.LastBlockId) { - return false - } - if !bytes.Equal(this.LastCommitHash, that1.LastCommitHash) { - return false - } - if !bytes.Equal(this.DataHash, that1.DataHash) { - return false - } - if !bytes.Equal(this.ValidatorsHash, that1.ValidatorsHash) { - return false - } - if !bytes.Equal(this.NextValidatorsHash, that1.NextValidatorsHash) { - return false - } - if !bytes.Equal(this.ConsensusHash, that1.ConsensusHash) { - return false - } - if !bytes.Equal(this.AppHash, that1.AppHash) { - return false - } - if !bytes.Equal(this.LastResultsHash, that1.LastResultsHash) { - return false - } - if !bytes.Equal(this.EvidenceHash, that1.EvidenceHash) { - return false - } - if !bytes.Equal(this.ProposerAddress, that1.ProposerAddress) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Version) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Version) - if !ok { - that2, ok := that.(Version) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Block != that1.Block { - return false - } - if this.App != that1.App { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *BlockID) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BlockID) - if !ok { - that2, ok := that.(BlockID) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Hash, that1.Hash) { - return false - } - if !this.PartsHeader.Equal(&that1.PartsHeader) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *PartSetHeader) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PartSetHeader) - if !ok { - that2, ok := that.(PartSetHeader) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Total != that1.Total { - return false - } - if !bytes.Equal(this.Hash, that1.Hash) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Validator) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Validator) - if !ok { - that2, ok := that.(Validator) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Address, that1.Address) { - return false - } - if this.Power != that1.Power { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ValidatorUpdate) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ValidatorUpdate) - if !ok { - that2, ok := that.(ValidatorUpdate) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.PubKey.Equal(&that1.PubKey) { - return false - } - if this.Power != that1.Power { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *VoteInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*VoteInfo) - if !ok { - that2, ok := that.(VoteInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Validator.Equal(&that1.Validator) { - return false - } - if this.SignedLastBlock != that1.SignedLastBlock { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *PubKey) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PubKey) - if !ok { - that2, ok := that.(PubKey) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Evidence) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Evidence) - if !ok { - that2, ok := that.(Evidence) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if !this.Validator.Equal(&that1.Validator) { - return false - } - if this.Height != that1.Height { - return false - } - if !this.Time.Equal(that1.Time) { - return false - } - if this.TotalVotingPower != that1.TotalVotingPower { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true +func init() { + proto.RegisterEnum("tendermint.abci.CheckTxType", CheckTxType_name, CheckTxType_value) + proto.RegisterEnum("tendermint.abci.EvidenceType", EvidenceType_name, EvidenceType_value) + proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + proto.RegisterType((*Request)(nil), "tendermint.abci.Request") + proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") + proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") + proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") + proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") + proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") + proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") + proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") + proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") + proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") + proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") + proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") + proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") + proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") + proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") + proto.RegisterType((*Response)(nil), "tendermint.abci.Response") + proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") + proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") + proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.ResponseFlush") + proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") + proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") + proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") + proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") + proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") + proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") + proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") + proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") + proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") + proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") + proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") + proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") + proto.RegisterType((*ConsensusParams)(nil), "tendermint.abci.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "tendermint.abci.BlockParams") + proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") + proto.RegisterType((*Event)(nil), "tendermint.abci.Event") + proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") + proto.RegisterType((*TxResult)(nil), "tendermint.abci.TxResult") + proto.RegisterType((*Validator)(nil), "tendermint.abci.Validator") + proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.ValidatorUpdate") + proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.VoteInfo") + proto.RegisterType((*Evidence)(nil), "tendermint.abci.Evidence") + proto.RegisterType((*Snapshot)(nil), "tendermint.abci.Snapshot") +} + +func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } + +var fileDescriptor_252557cfdd89a31a = []byte{ + // 2677 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7, + 0x11, 0xc6, 0x1b, 0xd8, 0x26, 0xf1, 0xe0, 0x88, 0x96, 0x61, 0x58, 0x26, 0xe5, 0x75, 0xd9, 0xb1, + 0x64, 0x9b, 0x8c, 0xa9, 0x92, 0x22, 0x95, 0x9d, 0xd8, 0x04, 0x04, 0x19, 0x34, 0x19, 0x92, 0x59, + 0x42, 0x72, 0x5e, 0xd6, 0x7a, 0x80, 0x1d, 0x02, 0x6b, 0x01, 0xbb, 0x6b, 0xec, 0x80, 0x22, 0x7d, + 0xcc, 0xe3, 0xa2, 0x5c, 0x7c, 0xcc, 0xc5, 0x55, 0xf9, 0x07, 0xb9, 0xe6, 0x94, 0x4b, 0x2e, 0xae, + 0x4a, 0xa5, 0xca, 0xc7, 0x9c, 0x9c, 0x94, 0x74, 0xcb, 0x1f, 0xc8, 0x29, 0x95, 0xd4, 0x3c, 0xf6, + 0x05, 0x60, 0x09, 0x30, 0xce, 0x2d, 0xb7, 0x9d, 0xde, 0xee, 0xc6, 0x4c, 0xef, 0xf4, 0xd7, 0xdf, + 0xf4, 0x00, 0x5e, 0xa4, 0xc4, 0x32, 0xc8, 0x68, 0x68, 0x5a, 0x74, 0x13, 0x77, 0xba, 0xe6, 0x26, + 0x3d, 0x73, 0x88, 0xbb, 0xe1, 0x8c, 0x6c, 0x6a, 0xa3, 0x72, 0xf0, 0x72, 0x83, 0xbd, 0xac, 0xbd, + 0x14, 0xd2, 0xee, 0x8e, 0xce, 0x1c, 0x6a, 0x6f, 0x3a, 0x23, 0xdb, 0x3e, 0x16, 0xfa, 0xb5, 0x2b, + 0xa1, 0xd7, 0xdc, 0x4f, 0xd8, 0x5b, 0xe4, 0xad, 0x34, 0x7e, 0x44, 0xce, 0xbc, 0xb7, 0x2f, 0x4d, + 0xd9, 0x3a, 0x78, 0x84, 0x87, 0xde, 0xeb, 0xf5, 0x9e, 0x6d, 0xf7, 0x06, 0x64, 0x93, 0x8f, 0x3a, + 0xe3, 0xe3, 0x4d, 0x6a, 0x0e, 0x89, 0x4b, 0xf1, 0xd0, 0x91, 0x0a, 0xab, 0x3d, 0xbb, 0x67, 0xf3, + 0xc7, 0x4d, 0xf6, 0x24, 0xa4, 0xea, 0x5f, 0xf2, 0x90, 0xd7, 0xc8, 0x67, 0x63, 0xe2, 0x52, 0xb4, + 0x05, 0x19, 0xd2, 0xed, 0xdb, 0xd5, 0xe4, 0xd5, 0xe4, 0xeb, 0x4b, 0x5b, 0x57, 0x36, 0x26, 0x16, + 0xb7, 0x21, 0xf5, 0x9a, 0xdd, 0xbe, 0xdd, 0x4a, 0x68, 0x5c, 0x17, 0xdd, 0x84, 0xec, 0xf1, 0x60, + 0xec, 0xf6, 0xab, 0x29, 0x6e, 0xf4, 0x52, 0x9c, 0xd1, 0x3d, 0xa6, 0xd4, 0x4a, 0x68, 0x42, 0x9b, + 0xfd, 0x94, 0x69, 0x1d, 0xdb, 0xd5, 0xf4, 0xf9, 0x3f, 0xb5, 0x63, 0x1d, 0xf3, 0x9f, 0x62, 0xba, + 0xa8, 0x0e, 0x60, 0x5a, 0x26, 0xd5, 0xbb, 0x7d, 0x6c, 0x5a, 0xd5, 0x0c, 0xb7, 0x7c, 0x39, 0xde, + 0xd2, 0xa4, 0x0d, 0xa6, 0xd8, 0x4a, 0x68, 0x8a, 0xe9, 0x0d, 0xd8, 0x74, 0x3f, 0x1b, 0x93, 0xd1, + 0x59, 0x35, 0x7b, 0xfe, 0x74, 0x7f, 0xc4, 0x94, 0xd8, 0x74, 0xb9, 0x36, 0x6a, 0xc2, 0x52, 0x87, + 0xf4, 0x4c, 0x4b, 0xef, 0x0c, 0xec, 0xee, 0xa3, 0x6a, 0x8e, 0x1b, 0xab, 0x71, 0xc6, 0x75, 0xa6, + 0x5a, 0x67, 0x9a, 0xad, 0x84, 0x06, 0x1d, 0x7f, 0x84, 0xde, 0x85, 0x42, 0xb7, 0x4f, 0xba, 0x8f, + 0x74, 0x7a, 0x5a, 0xcd, 0x73, 0x1f, 0xeb, 0x71, 0x3e, 0x1a, 0x4c, 0xaf, 0x7d, 0xda, 0x4a, 0x68, + 0xf9, 0xae, 0x78, 0x64, 0xeb, 0x37, 0xc8, 0xc0, 0x3c, 0x21, 0x23, 0x66, 0x5f, 0x38, 0x7f, 0xfd, + 0x77, 0x85, 0x26, 0xf7, 0xa0, 0x18, 0xde, 0x00, 0xbd, 0x07, 0x0a, 0xb1, 0x0c, 0xb9, 0x0c, 0x85, + 0xbb, 0xb8, 0x1a, 0xfb, 0x9d, 0x2d, 0xc3, 0x5b, 0x44, 0x81, 0xc8, 0x67, 0x74, 0x1b, 0x72, 0x5d, + 0x7b, 0x38, 0x34, 0x69, 0x15, 0xb8, 0xf5, 0x5a, 0xec, 0x02, 0xb8, 0x56, 0x2b, 0xa1, 0x49, 0x7d, + 0xb4, 0x0f, 0xa5, 0x81, 0xe9, 0x52, 0xdd, 0xb5, 0xb0, 0xe3, 0xf6, 0x6d, 0xea, 0x56, 0x97, 0xb8, + 0x87, 0x57, 0xe3, 0x3c, 0xec, 0x99, 0x2e, 0x3d, 0xf2, 0x94, 0x5b, 0x09, 0xad, 0x38, 0x08, 0x0b, + 0x98, 0x3f, 0xfb, 0xf8, 0x98, 0x8c, 0x7c, 0x87, 0xd5, 0xe5, 0xf3, 0xfd, 0x1d, 0x30, 0x6d, 0xcf, + 0x9e, 0xf9, 0xb3, 0xc3, 0x02, 0xf4, 0x33, 0xb8, 0x34, 0xb0, 0xb1, 0xe1, 0xbb, 0xd3, 0xbb, 0xfd, + 0xb1, 0xf5, 0xa8, 0x5a, 0xe4, 0x4e, 0xaf, 0xc5, 0x4e, 0xd2, 0xc6, 0x86, 0xe7, 0xa2, 0xc1, 0x0c, + 0x5a, 0x09, 0x6d, 0x65, 0x30, 0x29, 0x44, 0x0f, 0x61, 0x15, 0x3b, 0xce, 0xe0, 0x6c, 0xd2, 0x7b, + 0x89, 0x7b, 0xbf, 0x1e, 0xe7, 0x7d, 0x9b, 0xd9, 0x4c, 0xba, 0x47, 0x78, 0x4a, 0x5a, 0xcf, 0x43, + 0xf6, 0x04, 0x0f, 0xc6, 0x44, 0xfd, 0x0e, 0x2c, 0x85, 0xd2, 0x14, 0x55, 0x21, 0x3f, 0x24, 0xae, + 0x8b, 0x7b, 0x84, 0x67, 0xb5, 0xa2, 0x79, 0x43, 0xb5, 0x04, 0xcb, 0xe1, 0xd4, 0x54, 0x87, 0xbe, + 0x21, 0x4b, 0x3a, 0x66, 0x78, 0x42, 0x46, 0xae, 0x69, 0x5b, 0x9e, 0xa1, 0x1c, 0xa2, 0x57, 0xa0, + 0xc8, 0xb7, 0x8f, 0xee, 0xbd, 0x67, 0x99, 0x9f, 0xd1, 0x96, 0xb9, 0xf0, 0x81, 0x54, 0x5a, 0x87, + 0x25, 0x67, 0xcb, 0xf1, 0x55, 0xd2, 0x5c, 0x05, 0x9c, 0x2d, 0x47, 0x2a, 0xa8, 0x7f, 0x4e, 0x41, + 0x65, 0x32, 0x55, 0xd1, 0x6d, 0xc8, 0x30, 0xd4, 0x92, 0x00, 0x54, 0xdb, 0x10, 0x90, 0xb6, 0xe1, + 0x41, 0xda, 0x46, 0xdb, 0x83, 0xb4, 0x7a, 0xe1, 0xab, 0x6f, 0xd6, 0x13, 0x5f, 0xfc, 0x6d, 0x3d, + 0xa9, 0x71, 0x0b, 0xf4, 0x02, 0xcb, 0x2c, 0x6c, 0x5a, 0xba, 0x69, 0xf0, 0xf9, 0x28, 0x2c, 0x6d, + 0xb0, 0x69, 0xed, 0x18, 0x68, 0x17, 0x2a, 0x5d, 0xdb, 0x72, 0x89, 0xe5, 0x8e, 0x5d, 0x5d, 0x40, + 0xa6, 0x84, 0x9d, 0xe9, 0x9d, 0xdf, 0xf0, 0x14, 0x0f, 0xb9, 0x9e, 0x56, 0xee, 0x46, 0x05, 0xe8, + 0x1e, 0xc0, 0x09, 0x1e, 0x98, 0x06, 0xa6, 0xf6, 0xc8, 0xad, 0x66, 0xae, 0xa6, 0x67, 0xba, 0x79, + 0xe0, 0xa9, 0xdc, 0x77, 0x0c, 0x4c, 0x49, 0x3d, 0xc3, 0x66, 0xab, 0x85, 0x2c, 0xd1, 0x6b, 0x50, + 0xc6, 0x8e, 0xa3, 0xbb, 0x14, 0x53, 0xa2, 0x77, 0xce, 0x28, 0x71, 0x39, 0x22, 0x2d, 0x6b, 0x45, + 0xec, 0x38, 0x47, 0x4c, 0x5a, 0x67, 0x42, 0xf4, 0x2a, 0x94, 0x18, 0x78, 0x99, 0x78, 0xa0, 0xf7, + 0x89, 0xd9, 0xeb, 0x53, 0x8e, 0x3d, 0x69, 0xad, 0x28, 0xa5, 0x2d, 0x2e, 0x54, 0x0d, 0xff, 0x63, + 0x72, 0xe0, 0x42, 0x08, 0x32, 0x06, 0xa6, 0x98, 0x07, 0x72, 0x59, 0xe3, 0xcf, 0x4c, 0xe6, 0x60, + 0xda, 0x97, 0xe1, 0xe1, 0xcf, 0xe8, 0x32, 0xe4, 0xa4, 0xdb, 0x34, 0x77, 0x2b, 0x47, 0x68, 0x15, + 0xb2, 0xce, 0xc8, 0x3e, 0x21, 0x1c, 0x65, 0x0b, 0x9a, 0x18, 0xa8, 0xbf, 0x4a, 0xc1, 0xca, 0x14, + 0xc4, 0x31, 0xbf, 0x7d, 0xec, 0xf6, 0xbd, 0xdf, 0x62, 0xcf, 0xe8, 0x16, 0xf3, 0x8b, 0x0d, 0x32, + 0x92, 0x65, 0xa1, 0x1a, 0x0e, 0x91, 0x28, 0x79, 0x2d, 0xfe, 0x5e, 0x86, 0x46, 0x6a, 0xa3, 0x03, + 0xa8, 0x0c, 0xb0, 0x4b, 0x75, 0x01, 0x19, 0x7a, 0xa8, 0x44, 0x4c, 0x03, 0xe5, 0x1e, 0xf6, 0x40, + 0x86, 0x6d, 0x58, 0xe9, 0xa8, 0x34, 0x88, 0x48, 0x91, 0x06, 0xab, 0x9d, 0xb3, 0xcf, 0xb1, 0x45, + 0x4d, 0x8b, 0xe8, 0x53, 0x5f, 0xee, 0x85, 0x29, 0xa7, 0xcd, 0x13, 0xd3, 0x20, 0x56, 0xd7, 0xfb, + 0x64, 0x97, 0x7c, 0x63, 0xff, 0x93, 0xba, 0xaa, 0x06, 0xa5, 0x28, 0x48, 0xa3, 0x12, 0xa4, 0xe8, + 0xa9, 0x0c, 0x40, 0x8a, 0x9e, 0xa2, 0xef, 0x42, 0x86, 0x2d, 0x92, 0x2f, 0xbe, 0x34, 0xa3, 0xba, + 0x49, 0xbb, 0xf6, 0x99, 0x43, 0x34, 0xae, 0xa9, 0xaa, 0x7e, 0x36, 0xf8, 0xc0, 0x3d, 0xe9, 0x55, + 0xbd, 0x06, 0xe5, 0x09, 0x64, 0x0e, 0x7d, 0xbf, 0x64, 0xf8, 0xfb, 0xa9, 0x65, 0x28, 0x46, 0x60, + 0x58, 0xbd, 0x0c, 0xab, 0xb3, 0x50, 0x55, 0xed, 0xfb, 0xf2, 0x08, 0x3a, 0xa2, 0x9b, 0x50, 0xf0, + 0x61, 0x55, 0x64, 0xe3, 0x74, 0xac, 0x3c, 0x65, 0xcd, 0x57, 0x65, 0x69, 0xc8, 0xb6, 0x35, 0xdf, + 0x0f, 0x29, 0x3e, 0xf1, 0x3c, 0x76, 0x9c, 0x16, 0x76, 0xfb, 0xea, 0x27, 0x50, 0x8d, 0x83, 0xcc, + 0x89, 0x65, 0x64, 0xfc, 0x6d, 0x78, 0x19, 0x72, 0xc7, 0xf6, 0x68, 0x88, 0x29, 0x77, 0x56, 0xd4, + 0xe4, 0x88, 0x6d, 0x4f, 0x01, 0x9f, 0x69, 0x2e, 0x16, 0x03, 0x55, 0x87, 0x17, 0x62, 0x61, 0x93, + 0x99, 0x98, 0x96, 0x41, 0x44, 0x3c, 0x8b, 0x9a, 0x18, 0x04, 0x8e, 0xc4, 0x64, 0xc5, 0x80, 0xfd, + 0xac, 0xcb, 0xd7, 0xca, 0xfd, 0x2b, 0x9a, 0x1c, 0xa9, 0xbf, 0x2b, 0x40, 0x41, 0x23, 0xae, 0xc3, + 0x30, 0x01, 0xd5, 0x41, 0x21, 0xa7, 0x5d, 0xe2, 0x50, 0x0f, 0x22, 0x67, 0x13, 0x02, 0xa1, 0xdd, + 0xf4, 0x34, 0x59, 0x35, 0xf6, 0xcd, 0xd0, 0x0d, 0x49, 0xb8, 0xe2, 0xb9, 0x93, 0x34, 0x0f, 0x33, + 0xae, 0x5b, 0x1e, 0xe3, 0x4a, 0xc7, 0x16, 0x60, 0x61, 0x35, 0x41, 0xb9, 0x6e, 0x48, 0xca, 0x95, + 0x99, 0xf3, 0x63, 0x11, 0xce, 0xd5, 0x88, 0x70, 0xae, 0xec, 0x9c, 0x65, 0xc6, 0x90, 0xae, 0x5b, + 0x1e, 0xe9, 0xca, 0xcd, 0x99, 0xf1, 0x04, 0xeb, 0xba, 0x17, 0x65, 0x5d, 0x82, 0x31, 0xbd, 0x12, + 0x6b, 0x1d, 0x4b, 0xbb, 0xbe, 0x1f, 0xa2, 0x5d, 0x85, 0x58, 0xce, 0x23, 0x9c, 0xcc, 0xe0, 0x5d, + 0x8d, 0x08, 0xef, 0x52, 0xe6, 0xc4, 0x20, 0x86, 0x78, 0xbd, 0x1f, 0x26, 0x5e, 0x10, 0xcb, 0xdd, + 0xe4, 0xf7, 0x9e, 0xc5, 0xbc, 0xee, 0xf8, 0xcc, 0x6b, 0x29, 0x96, 0x3a, 0xca, 0x35, 0x4c, 0x52, + 0xaf, 0x83, 0x29, 0xea, 0x25, 0xa8, 0xd2, 0x6b, 0xb1, 0x2e, 0xe6, 0x70, 0xaf, 0x83, 0x29, 0xee, + 0x55, 0x9c, 0xe3, 0x70, 0x0e, 0xf9, 0xfa, 0xf9, 0x6c, 0xf2, 0x15, 0x4f, 0x8f, 0xe4, 0x34, 0x17, + 0x63, 0x5f, 0x7a, 0x0c, 0xfb, 0x2a, 0x73, 0xf7, 0x6f, 0xc4, 0xba, 0xbf, 0x38, 0xfd, 0xba, 0xc6, + 0x2a, 0xe4, 0x44, 0xce, 0x33, 0x94, 0x21, 0xa3, 0x91, 0x3d, 0x92, 0x4c, 0x4a, 0x0c, 0xd4, 0xd7, + 0x59, 0xcd, 0x0e, 0xf2, 0xfb, 0x1c, 0xaa, 0xc6, 0xd1, 0x3c, 0x94, 0xd3, 0xea, 0x1f, 0x92, 0x81, + 0x2d, 0x2f, 0x73, 0xe1, 0x7a, 0xaf, 0xc8, 0x7a, 0x1f, 0x62, 0x70, 0xa9, 0x28, 0x83, 0x5b, 0x87, + 0x25, 0x86, 0xd2, 0x13, 0xe4, 0x0c, 0x3b, 0x1e, 0x39, 0x43, 0xd7, 0x61, 0x85, 0x97, 0x61, 0xc1, + 0xf3, 0x24, 0x34, 0x67, 0x78, 0x85, 0x29, 0xb3, 0x17, 0x62, 0x73, 0x0a, 0x8c, 0x7e, 0x0b, 0x2e, + 0x85, 0x74, 0x7d, 0xf4, 0x17, 0x6c, 0xa6, 0xe2, 0x6b, 0x6f, 0xcb, 0x32, 0xf0, 0xa7, 0x64, 0x10, + 0xa1, 0x80, 0xf8, 0xcd, 0xe2, 0x68, 0xc9, 0xff, 0x0d, 0x47, 0x4b, 0xfd, 0xd7, 0x1c, 0x2d, 0x5c, + 0xcc, 0xd2, 0xd1, 0x62, 0xf6, 0xcf, 0x64, 0xf0, 0x49, 0x7c, 0xc6, 0xd5, 0xb5, 0x0d, 0x22, 0xcb, + 0x0b, 0x7f, 0x46, 0x15, 0x48, 0x0f, 0xec, 0x9e, 0x2c, 0x22, 0xec, 0x91, 0x69, 0xf9, 0x18, 0xac, + 0x48, 0x88, 0xf5, 0x2b, 0x53, 0x96, 0x07, 0x58, 0x56, 0xa6, 0x0a, 0xa4, 0x1f, 0x11, 0x81, 0x98, + 0xcb, 0x1a, 0x7b, 0x64, 0x7a, 0x7c, 0x8f, 0x71, 0x1c, 0x5c, 0xd6, 0xc4, 0x00, 0xdd, 0x06, 0x85, + 0x37, 0x18, 0x74, 0xdb, 0x71, 0x25, 0xb8, 0xbd, 0x18, 0x5e, 0xab, 0xe8, 0x23, 0x6c, 0x1c, 0x32, + 0x9d, 0x03, 0xc7, 0xd5, 0x0a, 0x8e, 0x7c, 0x0a, 0x15, 0x5d, 0x25, 0xc2, 0xfd, 0xae, 0x80, 0xc2, + 0x66, 0xef, 0x3a, 0xb8, 0x4b, 0x38, 0x52, 0x29, 0x5a, 0x20, 0x50, 0x1f, 0x02, 0x9a, 0xc6, 0x5b, + 0xd4, 0x82, 0x1c, 0x39, 0x21, 0x16, 0x65, 0x5f, 0x8d, 0x85, 0xfb, 0xf2, 0x0c, 0x62, 0x45, 0x2c, + 0x5a, 0xaf, 0xb2, 0x20, 0xff, 0xe3, 0x9b, 0xf5, 0x8a, 0xd0, 0x7e, 0xd3, 0x1e, 0x9a, 0x94, 0x0c, + 0x1d, 0x7a, 0xa6, 0x49, 0x7b, 0xf5, 0x97, 0x29, 0xc6, 0x72, 0x22, 0x58, 0x3c, 0x33, 0xb6, 0xde, + 0x8e, 0x4f, 0x85, 0x18, 0xee, 0x62, 0xf1, 0x5e, 0x03, 0xe8, 0x61, 0x57, 0x7f, 0x8c, 0x2d, 0x4a, + 0x0c, 0x19, 0xf4, 0x90, 0x04, 0xd5, 0xa0, 0xc0, 0x46, 0x63, 0x97, 0x18, 0x92, 0x6c, 0xfb, 0xe3, + 0xd0, 0x3a, 0xf3, 0xdf, 0x6e, 0x9d, 0xd1, 0x28, 0x17, 0x26, 0xa3, 0xfc, 0xeb, 0x54, 0x90, 0x25, + 0x01, 0x21, 0xfc, 0xff, 0x8b, 0xc3, 0x6f, 0xf8, 0x29, 0x31, 0x5a, 0x14, 0xd1, 0x11, 0xac, 0xf8, + 0x59, 0xaa, 0x8f, 0x79, 0xf6, 0x7a, 0xfb, 0x6e, 0xd1, 0x34, 0xaf, 0x9c, 0x44, 0xc5, 0x2e, 0xfa, + 0x31, 0x3c, 0x3f, 0x81, 0x40, 0xbe, 0xeb, 0xd4, 0x82, 0x40, 0xf4, 0x5c, 0x14, 0x88, 0x3c, 0xcf, + 0x41, 0xac, 0xd2, 0xdf, 0x32, 0x37, 0x76, 0xd8, 0xc1, 0x23, 0x5c, 0xe2, 0x67, 0x7e, 0xfd, 0x57, + 0xa0, 0x38, 0x22, 0x94, 0x9d, 0x85, 0x23, 0x47, 0xbb, 0x65, 0x21, 0x94, 0x07, 0xc6, 0x43, 0x78, + 0x6e, 0x66, 0xa9, 0x47, 0xdf, 0x03, 0x25, 0x60, 0x09, 0xc9, 0x98, 0x53, 0x92, 0xcf, 0xfc, 0x03, + 0x5d, 0xf5, 0x8f, 0xc9, 0xc0, 0x65, 0xf4, 0x2c, 0xd1, 0x84, 0xdc, 0x88, 0xb8, 0xe3, 0x81, 0x60, + 0xf7, 0xa5, 0xad, 0xb7, 0x16, 0x23, 0x09, 0x4c, 0x3a, 0x1e, 0x50, 0x4d, 0x1a, 0xab, 0x0f, 0x21, + 0x27, 0x24, 0x68, 0x09, 0xf2, 0xf7, 0xf7, 0x77, 0xf7, 0x0f, 0x3e, 0xda, 0xaf, 0x24, 0x10, 0x40, + 0x6e, 0xbb, 0xd1, 0x68, 0x1e, 0xb6, 0x2b, 0x49, 0xa4, 0x40, 0x76, 0xbb, 0x7e, 0xa0, 0xb5, 0x2b, + 0x29, 0x26, 0xd6, 0x9a, 0x1f, 0x36, 0x1b, 0xed, 0x4a, 0x1a, 0xad, 0x40, 0x51, 0x3c, 0xeb, 0xf7, + 0x0e, 0xb4, 0x1f, 0x6e, 0xb7, 0x2b, 0x99, 0x90, 0xe8, 0xa8, 0xb9, 0x7f, 0xb7, 0xa9, 0x55, 0xb2, + 0xea, 0xdb, 0xec, 0xf8, 0x10, 0x43, 0x2b, 0x82, 0x83, 0x42, 0x32, 0x74, 0x50, 0x50, 0x7f, 0x9b, + 0x82, 0x5a, 0x3c, 0x57, 0x40, 0x1f, 0x4e, 0x2c, 0x7c, 0xeb, 0x02, 0x44, 0x63, 0x62, 0xf5, 0xe8, + 0x55, 0x28, 0x8d, 0xc8, 0x31, 0xa1, 0xdd, 0xbe, 0xe0, 0x2e, 0xa2, 0xb0, 0x15, 0xb5, 0xa2, 0x94, + 0x72, 0x23, 0x57, 0xa8, 0x7d, 0x4a, 0xba, 0x54, 0x17, 0x67, 0x16, 0xb1, 0xe9, 0x14, 0xa6, 0xc6, + 0xa4, 0x47, 0x42, 0xa8, 0x7e, 0x72, 0xa1, 0x58, 0x2a, 0x90, 0xd5, 0x9a, 0x6d, 0xed, 0x27, 0x95, + 0x34, 0x42, 0x50, 0xe2, 0x8f, 0xfa, 0xd1, 0xfe, 0xf6, 0xe1, 0x51, 0xeb, 0x80, 0xc5, 0xf2, 0x12, + 0x94, 0xbd, 0x58, 0x7a, 0xc2, 0xac, 0xfa, 0xef, 0x24, 0x94, 0x27, 0x12, 0x04, 0x6d, 0x41, 0x56, + 0xf0, 0xdf, 0xb8, 0x06, 0x33, 0xcf, 0x6f, 0x99, 0x4d, 0x42, 0x15, 0xbd, 0x0b, 0x05, 0x22, 0xcf, + 0xe4, 0xb3, 0x12, 0x51, 0xf4, 0x12, 0xbc, 0x53, 0xbb, 0x34, 0xf5, 0x2d, 0xd0, 0x7b, 0xa0, 0xf8, + 0x99, 0x2e, 0xcf, 0x4b, 0x2f, 0x4f, 0x9b, 0xfb, 0x18, 0x21, 0xed, 0x03, 0x1b, 0x74, 0x27, 0x20, + 0x51, 0x99, 0x69, 0xd6, 0x2d, 0xcd, 0x85, 0x82, 0x34, 0xf6, 0xf4, 0xd5, 0x06, 0x2c, 0x85, 0xd6, + 0x83, 0x5e, 0x04, 0x65, 0x88, 0x4f, 0x65, 0xaf, 0x47, 0x9c, 0xd6, 0x0b, 0x43, 0x7c, 0x2a, 0xda, + 0x3c, 0xcf, 0x43, 0x9e, 0xbd, 0xec, 0x61, 0x81, 0x36, 0x69, 0x2d, 0x37, 0xc4, 0xa7, 0x1f, 0x60, + 0x57, 0xfd, 0x18, 0x4a, 0xd1, 0x3e, 0x07, 0xdb, 0x89, 0x23, 0x7b, 0x6c, 0x19, 0xdc, 0x47, 0x56, + 0x13, 0x03, 0x74, 0x13, 0xb2, 0x27, 0xb6, 0x00, 0xab, 0xd9, 0x29, 0xfb, 0xc0, 0xa6, 0x24, 0xd4, + 0x27, 0x11, 0xda, 0xea, 0xe7, 0x90, 0xe5, 0xe0, 0xc3, 0x80, 0x84, 0x77, 0x2c, 0x24, 0x81, 0x64, + 0xcf, 0xe8, 0x63, 0x00, 0x4c, 0xe9, 0xc8, 0xec, 0x8c, 0x03, 0xc7, 0xeb, 0xb3, 0xc1, 0x6b, 0xdb, + 0xd3, 0xab, 0x5f, 0x91, 0x28, 0xb6, 0x1a, 0x98, 0x86, 0x90, 0x2c, 0xe4, 0x50, 0xdd, 0x87, 0x52, + 0xd4, 0xd6, 0xe3, 0x3c, 0xc9, 0x19, 0x9c, 0x27, 0x15, 0xe6, 0x3c, 0x3e, 0x63, 0x4a, 0x8b, 0xee, + 0x14, 0x1f, 0xa8, 0x4f, 0x92, 0x50, 0x68, 0x9f, 0xca, 0x6d, 0x1d, 0xd3, 0x18, 0x09, 0x4c, 0x53, + 0xe1, 0x36, 0x80, 0xe8, 0xb4, 0xa4, 0xfd, 0xfe, 0xcd, 0xfb, 0x7e, 0xe2, 0x66, 0x16, 0x3d, 0xed, + 0x79, 0x8d, 0x2c, 0x09, 0x56, 0xef, 0x80, 0xe2, 0xef, 0x2a, 0xc6, 0xc4, 0xb1, 0x61, 0x8c, 0x88, + 0xeb, 0xca, 0xb5, 0x79, 0x43, 0xde, 0x67, 0xb3, 0x1f, 0xcb, 0x46, 0x43, 0x5a, 0x13, 0x03, 0xd5, + 0x80, 0xf2, 0x44, 0xd9, 0x42, 0xef, 0x40, 0xde, 0x19, 0x77, 0x74, 0x2f, 0x3c, 0x13, 0xc9, 0xe3, + 0x91, 0xbc, 0x71, 0x67, 0x60, 0x76, 0x77, 0xc9, 0x99, 0x37, 0x19, 0x67, 0xdc, 0xd9, 0x15, 0x51, + 0x14, 0xbf, 0x92, 0x0a, 0xff, 0xca, 0x09, 0x14, 0xbc, 0x4d, 0x81, 0x7e, 0x10, 0xce, 0x13, 0xaf, + 0xfb, 0x1a, 0x5b, 0x4a, 0xa5, 0xfb, 0x50, 0x9a, 0x5c, 0x87, 0x15, 0xd7, 0xec, 0x59, 0xc4, 0xd0, + 0x83, 0xb3, 0x00, 0xff, 0xb5, 0x82, 0x56, 0x16, 0x2f, 0xf6, 0xbc, 0x83, 0x80, 0xfa, 0xaf, 0x24, + 0x14, 0xbc, 0x84, 0x45, 0x6f, 0x87, 0xf6, 0x5d, 0x69, 0x46, 0x53, 0xc2, 0x53, 0x0c, 0x5a, 0x65, + 0xd1, 0xb9, 0xa6, 0x2e, 0x3e, 0xd7, 0xb8, 0x9e, 0xa7, 0xd7, 0x7c, 0xce, 0x5c, 0xb8, 0xf9, 0xfc, + 0x26, 0x20, 0x6a, 0x53, 0x3c, 0xd0, 0x4f, 0x6c, 0x6a, 0x5a, 0x3d, 0x5d, 0x04, 0x5b, 0x30, 0xaa, + 0x0a, 0x7f, 0xf3, 0x80, 0xbf, 0x38, 0xe4, 0x71, 0xff, 0x45, 0x12, 0x0a, 0x7e, 0x6d, 0xbc, 0x68, + 0xe7, 0xeb, 0x32, 0xe4, 0x24, 0xfc, 0x8b, 0xd6, 0x97, 0x1c, 0xf9, 0x4d, 0xd8, 0x4c, 0xa8, 0x09, + 0x5b, 0x83, 0xc2, 0x90, 0x50, 0xcc, 0x09, 0x82, 0x38, 0x8e, 0xf9, 0xe3, 0xeb, 0x77, 0x60, 0x29, + 0xd4, 0x84, 0x64, 0x99, 0xb7, 0xdf, 0xfc, 0xa8, 0x92, 0xa8, 0xe5, 0x9f, 0x7c, 0x79, 0x35, 0xbd, + 0x4f, 0x1e, 0xb3, 0x3d, 0xab, 0x35, 0x1b, 0xad, 0x66, 0x63, 0xb7, 0x92, 0xac, 0x2d, 0x3d, 0xf9, + 0xf2, 0x6a, 0x5e, 0x23, 0xbc, 0x21, 0x72, 0xbd, 0x05, 0xcb, 0xe1, 0xaf, 0x12, 0xad, 0x20, 0x08, + 0x4a, 0x77, 0xef, 0x1f, 0xee, 0xed, 0x34, 0xb6, 0xdb, 0x4d, 0xfd, 0xc1, 0x41, 0xbb, 0x59, 0x49, + 0xa2, 0xe7, 0xe1, 0xd2, 0xde, 0xce, 0x07, 0xad, 0xb6, 0xde, 0xd8, 0xdb, 0x69, 0xee, 0xb7, 0xf5, + 0xed, 0x76, 0x7b, 0xbb, 0xb1, 0x5b, 0x49, 0x6d, 0xfd, 0x5e, 0x81, 0xf2, 0x76, 0xbd, 0xb1, 0xc3, + 0xaa, 0x9f, 0xd9, 0xc5, 0xfc, 0xac, 0xdc, 0x80, 0x0c, 0x3f, 0x0d, 0x9f, 0x7b, 0xfb, 0x58, 0x3b, + 0xbf, 0x55, 0x86, 0xee, 0x41, 0x96, 0x1f, 0x94, 0xd1, 0xf9, 0xd7, 0x91, 0xb5, 0x39, 0xbd, 0x33, + 0x36, 0x19, 0x9e, 0x1e, 0xe7, 0xde, 0x4f, 0xd6, 0xce, 0x6f, 0xa5, 0x21, 0x0d, 0x94, 0x80, 0xc2, + 0xcf, 0xbf, 0xaf, 0xab, 0x2d, 0x00, 0x36, 0x68, 0x0f, 0xf2, 0xde, 0xe1, 0x68, 0xde, 0x0d, 0x62, + 0x6d, 0x6e, 0xaf, 0x8b, 0x85, 0x4b, 0x1c, 0x62, 0xcf, 0xbf, 0x0e, 0xad, 0xcd, 0x69, 0xdc, 0xa1, + 0x1d, 0xc8, 0x49, 0x5e, 0x3a, 0xe7, 0x56, 0xb0, 0x36, 0xaf, 0x77, 0xc5, 0x82, 0x16, 0x74, 0x07, + 0xe6, 0x5f, 0xf2, 0xd6, 0x16, 0xe8, 0x49, 0xa2, 0xfb, 0x00, 0xa1, 0x23, 0xeb, 0x02, 0xb7, 0xb7, + 0xb5, 0x45, 0x7a, 0x8d, 0xe8, 0x00, 0x0a, 0xfe, 0xd1, 0x64, 0xee, 0x5d, 0x6a, 0x6d, 0x7e, 0xd3, + 0x0f, 0x3d, 0x84, 0x62, 0x94, 0x93, 0x2f, 0x76, 0x43, 0x5a, 0x5b, 0xb0, 0x9b, 0xc7, 0xfc, 0x47, + 0x09, 0xfa, 0x62, 0x37, 0xa6, 0xb5, 0x05, 0x9b, 0x7b, 0xe8, 0x53, 0x58, 0x99, 0x26, 0xd0, 0x8b, + 0x5f, 0xa0, 0xd6, 0x2e, 0xd0, 0xee, 0x43, 0x43, 0x40, 0x33, 0x88, 0xf7, 0x05, 0xee, 0x53, 0x6b, + 0x17, 0xe9, 0xfe, 0xd5, 0x9b, 0x5f, 0x3d, 0x5d, 0x4b, 0x7e, 0xfd, 0x74, 0x2d, 0xf9, 0xf7, 0xa7, + 0x6b, 0xc9, 0x2f, 0x9e, 0xad, 0x25, 0xbe, 0x7e, 0xb6, 0x96, 0xf8, 0xeb, 0xb3, 0xb5, 0xc4, 0x4f, + 0xdf, 0xe8, 0x99, 0xb4, 0x3f, 0xee, 0x6c, 0x74, 0xed, 0xe1, 0x66, 0xf8, 0x8f, 0x1a, 0xb3, 0xfe, + 0x3c, 0xd2, 0xc9, 0xf1, 0xa2, 0x72, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0xe8, 0xa9, + 0x78, 0x5c, 0x22, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5055,7 +3243,6 @@ type ABCIApplicationClient interface { Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) - SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) @@ -5063,6 +3250,10 @@ type ABCIApplicationClient interface { InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) + ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) + OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) } type aBCIApplicationClient struct { @@ -5075,7 +3266,7 @@ func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { out := new(ResponseEcho) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/Echo", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Echo", in, out, opts...) if err != nil { return nil, err } @@ -5084,7 +3275,7 @@ func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { out := new(ResponseFlush) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/Flush", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Flush", in, out, opts...) if err != nil { return nil, err } @@ -5093,16 +3284,7 @@ func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opt func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { out := new(ResponseInfo) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOption, opts ...grpc.CallOption) (*ResponseSetOption, error) { - out := new(ResponseSetOption) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/SetOption", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Info", in, out, opts...) if err != nil { return nil, err } @@ -5111,7 +3293,7 @@ func (c *aBCIApplicationClient) SetOption(ctx context.Context, in *RequestSetOpt func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { out := new(ResponseDeliverTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/DeliverTx", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/DeliverTx", in, out, opts...) if err != nil { return nil, err } @@ -5120,7 +3302,7 @@ func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDelive func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { out := new(ResponseCheckTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/CheckTx", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/CheckTx", in, out, opts...) if err != nil { return nil, err } @@ -5129,7 +3311,7 @@ func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { out := new(ResponseQuery) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/Query", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Query", in, out, opts...) if err != nil { return nil, err } @@ -5138,7 +3320,7 @@ func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opt func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { out := new(ResponseCommit) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/Commit", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Commit", in, out, opts...) if err != nil { return nil, err } @@ -5147,7 +3329,7 @@ func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, o func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { out := new(ResponseInitChain) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/InitChain", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/InitChain", in, out, opts...) if err != nil { return nil, err } @@ -5156,7 +3338,7 @@ func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitCh func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { out := new(ResponseBeginBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/BeginBlock", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/BeginBlock", in, out, opts...) if err != nil { return nil, err } @@ -5165,7 +3347,43 @@ func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBegin func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { out := new(ResponseEndBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/EndBlock", in, out, opts...) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/EndBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { + out := new(ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { + out := new(ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { + out := new(ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { + out := new(ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", in, out, opts...) if err != nil { return nil, err } @@ -5177,7 +3395,6 @@ type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) Flush(context.Context, *RequestFlush) (*ResponseFlush, error) Info(context.Context, *RequestInfo) (*ResponseInfo, error) - SetOption(context.Context, *RequestSetOption) (*ResponseSetOption, error) DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) Query(context.Context, *RequestQuery) (*ResponseQuery, error) @@ -5185,6 +3402,10 @@ type ABCIApplicationServer interface { InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) + ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) + OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -5200,9 +3421,6 @@ func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *Reque func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") } -func (*UnimplementedABCIApplicationServer) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetOption not implemented") -} func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") } @@ -5224,6 +3442,18 @@ func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req * func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") } +func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) @@ -5239,7 +3469,7 @@ func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/Echo", + FullMethod: "/tendermint.abci.ABCIApplication/Echo", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) @@ -5257,7 +3487,7 @@ func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/Flush", + FullMethod: "/tendermint.abci.ABCIApplication/Flush", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) @@ -5275,7 +3505,7 @@ func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/Info", + FullMethod: "/tendermint.abci.ABCIApplication/Info", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) @@ -5283,24 +3513,6 @@ func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _ABCIApplication_SetOption_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestSetOption) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).SetOption(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/SetOption", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).SetOption(ctx, req.(*RequestSetOption)) - } - return interceptor(ctx, in, info, handler) -} - func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RequestDeliverTx) if err := dec(in); err != nil { @@ -5311,7 +3523,7 @@ func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/DeliverTx", + FullMethod: "/tendermint.abci.ABCIApplication/DeliverTx", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) @@ -5329,7 +3541,7 @@ func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/CheckTx", + FullMethod: "/tendermint.abci.ABCIApplication/CheckTx", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) @@ -5347,7 +3559,7 @@ func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/Query", + FullMethod: "/tendermint.abci.ABCIApplication/Query", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) @@ -5365,7 +3577,7 @@ func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/Commit", + FullMethod: "/tendermint.abci.ABCIApplication/Commit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) @@ -5383,7 +3595,7 @@ func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/InitChain", + FullMethod: "/tendermint.abci.ABCIApplication/InitChain", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) @@ -5401,7 +3613,7 @@ func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/BeginBlock", + FullMethod: "/tendermint.abci.ABCIApplication/BeginBlock", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) @@ -5419,7 +3631,7 @@ func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tendermint.abci.types.ABCIApplication/EndBlock", + FullMethod: "/tendermint.abci.ABCIApplication/EndBlock", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) @@ -5427,8 +3639,80 @@ func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestApplySnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.abci.types.ABCIApplication", + ServiceName: "tendermint.abci.ABCIApplication", HandlerType: (*ABCIApplicationServer)(nil), Methods: []grpc.MethodDesc{ { @@ -5443,10 +3727,6 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "Info", Handler: _ABCIApplication_Info_Handler, }, - { - MethodName: "SetOption", - Handler: _ABCIApplication_SetOption_Handler, - }, { MethodName: "DeliverTx", Handler: _ABCIApplication_DeliverTx_Handler, @@ -5475,9 +3755,25 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "EndBlock", Handler: _ABCIApplication_EndBlock_Handler, }, + { + MethodName: "ListSnapshots", + Handler: _ABCIApplication_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIApplication_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIApplication_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIApplication_ApplySnapshotChunk_Handler, + }, }, Streams: []grpc.StreamDesc{}, - Metadata: "abci/types/types.proto", + Metadata: "tendermint/abci/types.proto", } func (m *Request) Marshal() (dAtA []byte, err error) { @@ -5500,10 +3796,6 @@ func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Value != nil { { size := m.Value.Size() @@ -5533,7 +3825,7 @@ func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -5554,7 +3846,7 @@ func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } return len(dAtA) - i, nil } @@ -5575,20 +3867,41 @@ func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil } -func (m *Request_SetOption) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.SetOption != nil { + if m.Query != nil { { - size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5600,16 +3913,16 @@ func (m *Request_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.InitChain != nil { + if m.BeginBlock != nil { { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5621,16 +3934,16 @@ func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Query != nil { + if m.CheckTx != nil { { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5642,16 +3955,58 @@ func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.BeginBlock != nil { + if m.Commit != nil { { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5659,20 +4014,20 @@ func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x52 } return len(dAtA) - i, nil } -func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.CheckTx != nil { + if m.ListSnapshots != nil { { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5680,20 +4035,20 @@ func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x4a + dAtA[i] = 0x5a } return len(dAtA) - i, nil } -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.EndBlock != nil { + if m.OfferSnapshot != nil { { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5701,20 +4056,20 @@ func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a + dAtA[i] = 0x62 } return len(dAtA) - i, nil } -func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Commit != nil { + if m.LoadSnapshotChunk != nil { { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5722,20 +4077,20 @@ func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x62 + dAtA[i] = 0x6a } return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.DeliverTx != nil { + if m.ApplySnapshotChunk != nil { { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5743,9 +4098,7 @@ func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + dAtA[i] = 0x72 } return len(dAtA) - i, nil } @@ -5769,10 +4122,6 @@ func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) @@ -5803,10 +4152,6 @@ func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } return len(dAtA) - i, nil } @@ -5830,10 +4175,6 @@ func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.P2PVersion != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) i-- @@ -5854,47 +4195,6 @@ func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RequestSetOption) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestSetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -5915,9 +4215,10 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x30 } if len(m.AppStateBytes) > 0 { i -= len(m.AppStateBytes) @@ -5959,12 +4260,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err13 != nil { - return 0, err13 + n16, err16 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err16 != nil { + return 0, err16 } - i -= n13 - i = encodeVarintTypes(dAtA, i, uint64(n13)) + i -= n16 + i = encodeVarintTypes(dAtA, i, uint64(n16)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -5990,10 +4291,6 @@ func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Prove { i-- if m.Prove { @@ -6046,10 +4343,6 @@ func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.ByzantineValidators) > 0 { for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { { @@ -6114,10 +4407,6 @@ func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Type != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- @@ -6153,10 +4442,6 @@ func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Tx) > 0 { i -= len(m.Tx) copy(dAtA[i:], m.Tx) @@ -6187,10 +4472,6 @@ func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Height != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- @@ -6219,9 +4500,150 @@ func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + return len(dAtA) - i, nil +} + +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 + } + if m.Snapshot != nil { + { + size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Chunk != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -6246,10 +4668,6 @@ func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Value != nil { { size := m.Value.Size() @@ -6342,20 +4760,83 @@ func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } return len(dAtA) - i, nil } -func (m *Response_SetOption) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.SetOption != nil { + if m.CheckTx != nil { { - size, err := m.SetOption.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6363,20 +4844,20 @@ func (m *Response_SetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x42 } return len(dAtA) - i, nil } -func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.InitChain != nil { + if m.DeliverTx != nil { { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6384,20 +4865,20 @@ func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x4a } return len(dAtA) - i, nil } -func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Query != nil { + if m.EndBlock != nil { { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6405,20 +4886,20 @@ func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0x52 } return len(dAtA) - i, nil } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.BeginBlock != nil { + if m.Commit != nil { { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6426,20 +4907,20 @@ func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x5a } return len(dAtA) - i, nil } -func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.CheckTx != nil { + if m.ListSnapshots != nil { { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6447,20 +4928,20 @@ func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x4a + dAtA[i] = 0x62 } return len(dAtA) - i, nil } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.DeliverTx != nil { + if m.OfferSnapshot != nil { { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6468,20 +4949,20 @@ func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x52 + dAtA[i] = 0x6a } return len(dAtA) - i, nil } -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.EndBlock != nil { + if m.LoadSnapshotChunk != nil { { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6489,20 +4970,20 @@ func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a + dAtA[i] = 0x72 } return len(dAtA) - i, nil } -func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.Commit != nil { + if m.ApplySnapshotChunk != nil { { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6510,7 +4991,7 @@ func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x62 + dAtA[i] = 0x7a } return len(dAtA) - i, nil } @@ -6534,10 +5015,6 @@ func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Error) > 0 { i -= len(m.Error) copy(dAtA[i:], m.Error) @@ -6568,10 +5045,6 @@ func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) @@ -6602,10 +5075,6 @@ func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } return len(dAtA) - i, nil } @@ -6629,10 +5098,6 @@ func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.LastBlockAppHash) > 0 { i -= len(m.LastBlockAppHash) copy(dAtA[i:], m.LastBlockAppHash) @@ -6667,52 +5132,6 @@ func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseSetOption) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseSetOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseSetOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 - } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a - } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6733,9 +5152,12 @@ func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a } if len(m.Validators) > 0 { for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { @@ -6786,10 +5208,6 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -6802,9 +5220,9 @@ func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x48 } - if m.Proof != nil { + if m.ProofOps != nil { { - size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ProofOps.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -6875,10 +5293,6 @@ func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Events) > 0 { for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { { @@ -6916,10 +5330,6 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -7000,10 +5410,6 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -7064,7 +5470,170 @@ func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7074,64 +5643,27 @@ func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.ConsensusParamUpdates != nil { - { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) i-- - dAtA[i] = 0x12 - } - if len(m.ValidatorUpdates) > 0 { - for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { +func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7141,31 +5673,47 @@ func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.RejectSenders) > 0 { + for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RejectSenders[iNdEx]) + copy(dAtA[i:], m.RejectSenders[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RejectSenders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + if len(m.RefetchChunks) > 0 { + dAtA39 := make([]byte, len(m.RefetchChunks)*10) + var j38 int + for _, num := range m.RefetchChunks { + for num >= 1<<7 { + dAtA39[j38] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j38++ + } + dAtA39[j38] = uint8(num) + j38++ + } + i -= j38 + copy(dAtA[i:], dAtA39[:j38]) + i = encodeVarintTypes(dAtA, i, uint64(j38)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x12 } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x8 } return len(dAtA) - i, nil } @@ -7190,9 +5738,17 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Version != nil { + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } if m.Validator != nil { { @@ -7253,10 +5809,6 @@ func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.MaxGas != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) i-- @@ -7270,82 +5822,6 @@ func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n34, err34 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err34 != nil { - return 0, err34 - } - i -= n34 - i = encodeVarintTypes(dAtA, i, uint64(n34)) - i-- - dAtA[i] = 0x12 - if m.MaxAgeNumBlocks != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.MaxAgeNumBlocks)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatorParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PubKeyTypes) > 0 { - for iNdEx := len(m.PubKeyTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PubKeyTypes[iNdEx]) - copy(dAtA[i:], m.PubKeyTypes[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.PubKeyTypes[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7366,10 +5842,6 @@ func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Votes) > 0 { for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { { @@ -7412,10 +5884,6 @@ func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Attributes) > 0 { for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { { @@ -7440,7 +5908,7 @@ func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Header) Marshal() (dAtA []byte, err error) { +func (m *EventAttribute) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7450,164 +5918,44 @@ func (m *Header) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Header) MarshalTo(dAtA []byte) (int, error) { +func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ProposerAddress) > 0 { - i -= len(m.ProposerAddress) - copy(dAtA[i:], m.ProposerAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) - i-- - dAtA[i] = 0x72 - } - if len(m.EvidenceHash) > 0 { - i -= len(m.EvidenceHash) - copy(dAtA[i:], m.EvidenceHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) - i-- - dAtA[i] = 0x6a - } - if len(m.LastResultsHash) > 0 { - i -= len(m.LastResultsHash) - copy(dAtA[i:], m.LastResultsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) - i-- - dAtA[i] = 0x62 - } - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x5a - } - if len(m.ConsensusHash) > 0 { - i -= len(m.ConsensusHash) - copy(dAtA[i:], m.ConsensusHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) - i-- - dAtA[i] = 0x52 - } - if len(m.NextValidatorsHash) > 0 { - i -= len(m.NextValidatorsHash) - copy(dAtA[i:], m.NextValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) - i-- - dAtA[i] = 0x4a - } - if len(m.ValidatorsHash) > 0 { - i -= len(m.ValidatorsHash) - copy(dAtA[i:], m.ValidatorsHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) - i-- - dAtA[i] = 0x42 - } - if len(m.DataHash) > 0 { - i -= len(m.DataHash) - copy(dAtA[i:], m.DataHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) - i-- - dAtA[i] = 0x3a - } - if len(m.LastCommitHash) > 0 { - i -= len(m.LastCommitHash) - copy(dAtA[i:], m.LastCommitHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + if m.Index { i-- - dAtA[i] = 0x32 - } - { - size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Index { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - n36, err36 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err36 != nil { - return 0, err36 - } - i -= n36 - i = encodeVarintTypes(dAtA, i, uint64(n36)) - i-- - dAtA[i] = 0x22 - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- dAtA[i] = 0x18 } - if len(m.ChainID) > 0 { - i -= len(m.ChainID) - copy(dAtA[i:], m.ChainID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i-- dAtA[i] = 0x12 } - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Version) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.App != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.App)) - i-- - dAtA[i] = 0x10 - } - if m.Block != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Block)) + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BlockID) Marshal() (dAtA []byte, err error) { +func (m *TxResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -7617,22 +5965,18 @@ func (m *BlockID) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { +func (m *TxResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } { - size, err := m.PartsHeader.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -7640,50 +5984,21 @@ func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x22 + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x1a } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.Total != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- dAtA[i] = 0x8 } @@ -7710,10 +6025,6 @@ func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Power != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Power)) i-- @@ -7749,10 +6060,6 @@ func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Power != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Power)) i-- @@ -7791,10 +6098,6 @@ func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.SignedLastBlock { i-- if m.SignedLastBlock { @@ -7818,47 +6121,6 @@ func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PubKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PubKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PubKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *Evidence) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7879,21 +6141,17 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.TotalVotingPower != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) i-- dAtA[i] = 0x28 } - n41, err41 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err41 != nil { - return 0, err41 + n47, err47 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err47 != nil { + return 0, err47 } - i -= n41 - i = encodeVarintTypes(dAtA, i, uint64(n41)) + i -= n47 + i = encodeVarintTypes(dAtA, i, uint64(n47)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -7911,952 +6169,76 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func NewPopulatedRequest(r randyTypes, easy bool) *Request { - this := &Request{} - oneofNumber_Value := []int32{2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 19}[r.Intn(11)] - switch oneofNumber_Value { - case 2: - this.Value = NewPopulatedRequest_Echo(r, easy) - case 3: - this.Value = NewPopulatedRequest_Flush(r, easy) - case 4: - this.Value = NewPopulatedRequest_Info(r, easy) - case 5: - this.Value = NewPopulatedRequest_SetOption(r, easy) - case 6: - this.Value = NewPopulatedRequest_InitChain(r, easy) - case 7: - this.Value = NewPopulatedRequest_Query(r, easy) - case 8: - this.Value = NewPopulatedRequest_BeginBlock(r, easy) - case 9: - this.Value = NewPopulatedRequest_CheckTx(r, easy) - case 11: - this.Value = NewPopulatedRequest_EndBlock(r, easy) - case 12: - this.Value = NewPopulatedRequest_Commit(r, easy) - case 19: - this.Value = NewPopulatedRequest_DeliverTx(r, easy) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 20) - } - return this -} - -func NewPopulatedRequest_Echo(r randyTypes, easy bool) *Request_Echo { - this := &Request_Echo{} - this.Echo = NewPopulatedRequestEcho(r, easy) - return this -} -func NewPopulatedRequest_Flush(r randyTypes, easy bool) *Request_Flush { - this := &Request_Flush{} - this.Flush = NewPopulatedRequestFlush(r, easy) - return this -} -func NewPopulatedRequest_Info(r randyTypes, easy bool) *Request_Info { - this := &Request_Info{} - this.Info = NewPopulatedRequestInfo(r, easy) - return this -} -func NewPopulatedRequest_SetOption(r randyTypes, easy bool) *Request_SetOption { - this := &Request_SetOption{} - this.SetOption = NewPopulatedRequestSetOption(r, easy) - return this -} -func NewPopulatedRequest_InitChain(r randyTypes, easy bool) *Request_InitChain { - this := &Request_InitChain{} - this.InitChain = NewPopulatedRequestInitChain(r, easy) - return this -} -func NewPopulatedRequest_Query(r randyTypes, easy bool) *Request_Query { - this := &Request_Query{} - this.Query = NewPopulatedRequestQuery(r, easy) - return this -} -func NewPopulatedRequest_BeginBlock(r randyTypes, easy bool) *Request_BeginBlock { - this := &Request_BeginBlock{} - this.BeginBlock = NewPopulatedRequestBeginBlock(r, easy) - return this -} -func NewPopulatedRequest_CheckTx(r randyTypes, easy bool) *Request_CheckTx { - this := &Request_CheckTx{} - this.CheckTx = NewPopulatedRequestCheckTx(r, easy) - return this -} -func NewPopulatedRequest_EndBlock(r randyTypes, easy bool) *Request_EndBlock { - this := &Request_EndBlock{} - this.EndBlock = NewPopulatedRequestEndBlock(r, easy) - return this -} -func NewPopulatedRequest_Commit(r randyTypes, easy bool) *Request_Commit { - this := &Request_Commit{} - this.Commit = NewPopulatedRequestCommit(r, easy) - return this -} -func NewPopulatedRequest_DeliverTx(r randyTypes, easy bool) *Request_DeliverTx { - this := &Request_DeliverTx{} - this.DeliverTx = NewPopulatedRequestDeliverTx(r, easy) - return this -} -func NewPopulatedRequestEcho(r randyTypes, easy bool) *RequestEcho { - this := &RequestEcho{} - this.Message = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedRequestFlush(r randyTypes, easy bool) *RequestFlush { - this := &RequestFlush{} - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 1) - } - return this -} - -func NewPopulatedRequestInfo(r randyTypes, easy bool) *RequestInfo { - this := &RequestInfo{} - this.Version = string(randStringTypes(r)) - this.BlockVersion = uint64(uint64(r.Uint32())) - this.P2PVersion = uint64(uint64(r.Uint32())) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) - } - return this -} - -func NewPopulatedRequestSetOption(r randyTypes, easy bool) *RequestSetOption { - this := &RequestSetOption{} - this.Key = string(randStringTypes(r)) - this.Value = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedRequestInitChain(r randyTypes, easy bool) *RequestInitChain { - this := &RequestInitChain{} - v1 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v1 - this.ChainId = string(randStringTypes(r)) - if r.Intn(5) != 0 { - this.ConsensusParams = NewPopulatedConsensusParams(r, easy) - } - if r.Intn(5) != 0 { - v2 := r.Intn(5) - this.Validators = make([]ValidatorUpdate, v2) - for i := 0; i < v2; i++ { - v3 := NewPopulatedValidatorUpdate(r, easy) - this.Validators[i] = *v3 - } - } - v4 := r.Intn(100) - this.AppStateBytes = make([]byte, v4) - for i := 0; i < v4; i++ { - this.AppStateBytes[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 6) - } - return this -} - -func NewPopulatedRequestQuery(r randyTypes, easy bool) *RequestQuery { - this := &RequestQuery{} - v5 := r.Intn(100) - this.Data = make([]byte, v5) - for i := 0; i < v5; i++ { - this.Data[i] = byte(r.Intn(256)) - } - this.Path = string(randStringTypes(r)) - this.Height = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Height *= -1 - } - this.Prove = bool(bool(r.Intn(2) == 0)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 5) - } - return this -} - -func NewPopulatedRequestBeginBlock(r randyTypes, easy bool) *RequestBeginBlock { - this := &RequestBeginBlock{} - v6 := r.Intn(100) - this.Hash = make([]byte, v6) - for i := 0; i < v6; i++ { - this.Hash[i] = byte(r.Intn(256)) - } - v7 := NewPopulatedHeader(r, easy) - this.Header = *v7 - v8 := NewPopulatedLastCommitInfo(r, easy) - this.LastCommitInfo = *v8 - if r.Intn(5) != 0 { - v9 := r.Intn(5) - this.ByzantineValidators = make([]Evidence, v9) - for i := 0; i < v9; i++ { - v10 := NewPopulatedEvidence(r, easy) - this.ByzantineValidators[i] = *v10 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 5) - } - return this -} - -func NewPopulatedRequestCheckTx(r randyTypes, easy bool) *RequestCheckTx { - this := &RequestCheckTx{} - v11 := r.Intn(100) - this.Tx = make([]byte, v11) - for i := 0; i < v11; i++ { - this.Tx[i] = byte(r.Intn(256)) - } - this.Type = CheckTxType([]int32{0, 1}[r.Intn(2)]) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedRequestDeliverTx(r randyTypes, easy bool) *RequestDeliverTx { - this := &RequestDeliverTx{} - v12 := r.Intn(100) - this.Tx = make([]byte, v12) - for i := 0; i < v12; i++ { - this.Tx[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedRequestEndBlock(r randyTypes, easy bool) *RequestEndBlock { - this := &RequestEndBlock{} - this.Height = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Height *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedRequestCommit(r randyTypes, easy bool) *RequestCommit { - this := &RequestCommit{} - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 1) - } - return this -} - -func NewPopulatedResponse(r randyTypes, easy bool) *Response { - this := &Response{} - oneofNumber_Value := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}[r.Intn(12)] - switch oneofNumber_Value { - case 1: - this.Value = NewPopulatedResponse_Exception(r, easy) - case 2: - this.Value = NewPopulatedResponse_Echo(r, easy) - case 3: - this.Value = NewPopulatedResponse_Flush(r, easy) - case 4: - this.Value = NewPopulatedResponse_Info(r, easy) - case 5: - this.Value = NewPopulatedResponse_SetOption(r, easy) - case 6: - this.Value = NewPopulatedResponse_InitChain(r, easy) - case 7: - this.Value = NewPopulatedResponse_Query(r, easy) - case 8: - this.Value = NewPopulatedResponse_BeginBlock(r, easy) - case 9: - this.Value = NewPopulatedResponse_CheckTx(r, easy) - case 10: - this.Value = NewPopulatedResponse_DeliverTx(r, easy) - case 11: - this.Value = NewPopulatedResponse_EndBlock(r, easy) - case 12: - this.Value = NewPopulatedResponse_Commit(r, easy) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 13) - } - return this -} - -func NewPopulatedResponse_Exception(r randyTypes, easy bool) *Response_Exception { - this := &Response_Exception{} - this.Exception = NewPopulatedResponseException(r, easy) - return this -} -func NewPopulatedResponse_Echo(r randyTypes, easy bool) *Response_Echo { - this := &Response_Echo{} - this.Echo = NewPopulatedResponseEcho(r, easy) - return this -} -func NewPopulatedResponse_Flush(r randyTypes, easy bool) *Response_Flush { - this := &Response_Flush{} - this.Flush = NewPopulatedResponseFlush(r, easy) - return this -} -func NewPopulatedResponse_Info(r randyTypes, easy bool) *Response_Info { - this := &Response_Info{} - this.Info = NewPopulatedResponseInfo(r, easy) - return this -} -func NewPopulatedResponse_SetOption(r randyTypes, easy bool) *Response_SetOption { - this := &Response_SetOption{} - this.SetOption = NewPopulatedResponseSetOption(r, easy) - return this -} -func NewPopulatedResponse_InitChain(r randyTypes, easy bool) *Response_InitChain { - this := &Response_InitChain{} - this.InitChain = NewPopulatedResponseInitChain(r, easy) - return this -} -func NewPopulatedResponse_Query(r randyTypes, easy bool) *Response_Query { - this := &Response_Query{} - this.Query = NewPopulatedResponseQuery(r, easy) - return this -} -func NewPopulatedResponse_BeginBlock(r randyTypes, easy bool) *Response_BeginBlock { - this := &Response_BeginBlock{} - this.BeginBlock = NewPopulatedResponseBeginBlock(r, easy) - return this -} -func NewPopulatedResponse_CheckTx(r randyTypes, easy bool) *Response_CheckTx { - this := &Response_CheckTx{} - this.CheckTx = NewPopulatedResponseCheckTx(r, easy) - return this -} -func NewPopulatedResponse_DeliverTx(r randyTypes, easy bool) *Response_DeliverTx { - this := &Response_DeliverTx{} - this.DeliverTx = NewPopulatedResponseDeliverTx(r, easy) - return this -} -func NewPopulatedResponse_EndBlock(r randyTypes, easy bool) *Response_EndBlock { - this := &Response_EndBlock{} - this.EndBlock = NewPopulatedResponseEndBlock(r, easy) - return this -} -func NewPopulatedResponse_Commit(r randyTypes, easy bool) *Response_Commit { - this := &Response_Commit{} - this.Commit = NewPopulatedResponseCommit(r, easy) - return this -} -func NewPopulatedResponseException(r randyTypes, easy bool) *ResponseException { - this := &ResponseException{} - this.Error = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedResponseEcho(r randyTypes, easy bool) *ResponseEcho { - this := &ResponseEcho{} - this.Message = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedResponseFlush(r randyTypes, easy bool) *ResponseFlush { - this := &ResponseFlush{} - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 1) - } - return this -} - -func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { - this := &ResponseInfo{} - this.Data = string(randStringTypes(r)) - this.Version = string(randStringTypes(r)) - this.AppVersion = uint64(uint64(r.Uint32())) - this.LastBlockHeight = int64(r.Int63()) - if r.Intn(2) == 0 { - this.LastBlockHeight *= -1 - } - v13 := r.Intn(100) - this.LastBlockAppHash = make([]byte, v13) - for i := 0; i < v13; i++ { - this.LastBlockAppHash[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 6) - } - return this -} - -func NewPopulatedResponseSetOption(r randyTypes, easy bool) *ResponseSetOption { - this := &ResponseSetOption{} - this.Code = uint32(r.Uint32()) - this.Log = string(randStringTypes(r)) - this.Info = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 5) - } - return this -} - -func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { - this := &ResponseInitChain{} - if r.Intn(5) != 0 { - this.ConsensusParams = NewPopulatedConsensusParams(r, easy) - } - if r.Intn(5) != 0 { - v14 := r.Intn(5) - this.Validators = make([]ValidatorUpdate, v14) - for i := 0; i < v14; i++ { - v15 := NewPopulatedValidatorUpdate(r, easy) - this.Validators[i] = *v15 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { - this := &ResponseQuery{} - this.Code = uint32(r.Uint32()) - this.Log = string(randStringTypes(r)) - this.Info = string(randStringTypes(r)) - this.Index = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Index *= -1 - } - v16 := r.Intn(100) - this.Key = make([]byte, v16) - for i := 0; i < v16; i++ { - this.Key[i] = byte(r.Intn(256)) - } - v17 := r.Intn(100) - this.Value = make([]byte, v17) - for i := 0; i < v17; i++ { - this.Value[i] = byte(r.Intn(256)) - } - if r.Intn(5) != 0 { - this.Proof = merkle.NewPopulatedProof(r, easy) - } - this.Height = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Height *= -1 - } - this.Codespace = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 11) - } - return this -} - -func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { - this := &ResponseBeginBlock{} - if r.Intn(5) != 0 { - v18 := r.Intn(5) - this.Events = make([]Event, v18) - for i := 0; i < v18; i++ { - v19 := NewPopulatedEvent(r, easy) - this.Events[i] = *v19 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { - this := &ResponseCheckTx{} - this.Code = uint32(r.Uint32()) - v20 := r.Intn(100) - this.Data = make([]byte, v20) - for i := 0; i < v20; i++ { - this.Data[i] = byte(r.Intn(256)) - } - this.Log = string(randStringTypes(r)) - this.Info = string(randStringTypes(r)) - this.GasWanted = int64(r.Int63()) - if r.Intn(2) == 0 { - this.GasWanted *= -1 - } - this.GasUsed = int64(r.Int63()) - if r.Intn(2) == 0 { - this.GasUsed *= -1 - } - if r.Intn(5) != 0 { - v21 := r.Intn(5) - this.Events = make([]Event, v21) - for i := 0; i < v21; i++ { - v22 := NewPopulatedEvent(r, easy) - this.Events[i] = *v22 - } - } - this.Codespace = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 9) - } - return this -} - -func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { - this := &ResponseDeliverTx{} - this.Code = uint32(r.Uint32()) - v23 := r.Intn(100) - this.Data = make([]byte, v23) - for i := 0; i < v23; i++ { - this.Data[i] = byte(r.Intn(256)) - } - this.Log = string(randStringTypes(r)) - this.Info = string(randStringTypes(r)) - this.GasWanted = int64(r.Int63()) - if r.Intn(2) == 0 { - this.GasWanted *= -1 - } - this.GasUsed = int64(r.Int63()) - if r.Intn(2) == 0 { - this.GasUsed *= -1 - } - if r.Intn(5) != 0 { - v24 := r.Intn(5) - this.Events = make([]Event, v24) - for i := 0; i < v24; i++ { - v25 := NewPopulatedEvent(r, easy) - this.Events[i] = *v25 - } - } - this.Codespace = string(randStringTypes(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 9) - } - return this -} - -func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { - this := &ResponseEndBlock{} - if r.Intn(5) != 0 { - v26 := r.Intn(5) - this.ValidatorUpdates = make([]ValidatorUpdate, v26) - for i := 0; i < v26; i++ { - v27 := NewPopulatedValidatorUpdate(r, easy) - this.ValidatorUpdates[i] = *v27 - } - } - if r.Intn(5) != 0 { - this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) - } - if r.Intn(5) != 0 { - v28 := r.Intn(5) - this.Events = make([]Event, v28) - for i := 0; i < v28; i++ { - v29 := NewPopulatedEvent(r, easy) - this.Events[i] = *v29 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) - } - return this -} - -func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { - this := &ResponseCommit{} - v30 := r.Intn(100) - this.Data = make([]byte, v30) - for i := 0; i < v30; i++ { - this.Data[i] = byte(r.Intn(256)) - } - this.RetainHeight = int64(r.Int63()) - if r.Intn(2) == 0 { - this.RetainHeight *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) - } - return this -} - -func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { - this := &ConsensusParams{} - if r.Intn(5) != 0 { - this.Block = NewPopulatedBlockParams(r, easy) - } - if r.Intn(5) != 0 { - this.Evidence = NewPopulatedEvidenceParams(r, easy) - } - if r.Intn(5) != 0 { - this.Validator = NewPopulatedValidatorParams(r, easy) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) - } - return this -} - -func NewPopulatedBlockParams(r randyTypes, easy bool) *BlockParams { - this := &BlockParams{} - this.MaxBytes = int64(r.Int63()) - if r.Intn(2) == 0 { - this.MaxBytes *= -1 - } - this.MaxGas = int64(r.Int63()) - if r.Intn(2) == 0 { - this.MaxGas *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { - this := &EvidenceParams{} - this.MaxAgeNumBlocks = int64(r.Int63()) - if r.Intn(2) == 0 { - this.MaxAgeNumBlocks *= -1 - } - v31 := github_com_gogo_protobuf_types.NewPopulatedStdDuration(r, easy) - this.MaxAgeDuration = *v31 - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedValidatorParams(r randyTypes, easy bool) *ValidatorParams { - this := &ValidatorParams{} - v32 := r.Intn(10) - this.PubKeyTypes = make([]string, v32) - for i := 0; i < v32; i++ { - this.PubKeyTypes[i] = string(randStringTypes(r)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { - this := &LastCommitInfo{} - this.Round = int32(r.Int31()) - if r.Intn(2) == 0 { - this.Round *= -1 - } - if r.Intn(5) != 0 { - v33 := r.Intn(5) - this.Votes = make([]VoteInfo, v33) - for i := 0; i < v33; i++ { - v34 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v34 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedEvent(r randyTypes, easy bool) *Event { - this := &Event{} - this.Type = string(randStringTypes(r)) - if r.Intn(5) != 0 { - v35 := r.Intn(5) - this.Attributes = make([]kv.Pair, v35) - for i := 0; i < v35; i++ { - v36 := kv.NewPopulatedPair(r, easy) - this.Attributes[i] = *v36 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedHeader(r randyTypes, easy bool) *Header { - this := &Header{} - v37 := NewPopulatedVersion(r, easy) - this.Version = *v37 - this.ChainID = string(randStringTypes(r)) - this.Height = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Height *= -1 - } - v38 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v38 - v39 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v39 - v40 := r.Intn(100) - this.LastCommitHash = make([]byte, v40) - for i := 0; i < v40; i++ { - this.LastCommitHash[i] = byte(r.Intn(256)) - } - v41 := r.Intn(100) - this.DataHash = make([]byte, v41) - for i := 0; i < v41; i++ { - this.DataHash[i] = byte(r.Intn(256)) - } - v42 := r.Intn(100) - this.ValidatorsHash = make([]byte, v42) - for i := 0; i < v42; i++ { - this.ValidatorsHash[i] = byte(r.Intn(256)) - } - v43 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v43) - for i := 0; i < v43; i++ { - this.NextValidatorsHash[i] = byte(r.Intn(256)) - } - v44 := r.Intn(100) - this.ConsensusHash = make([]byte, v44) - for i := 0; i < v44; i++ { - this.ConsensusHash[i] = byte(r.Intn(256)) - } - v45 := r.Intn(100) - this.AppHash = make([]byte, v45) - for i := 0; i < v45; i++ { - this.AppHash[i] = byte(r.Intn(256)) - } - v46 := r.Intn(100) - this.LastResultsHash = make([]byte, v46) - for i := 0; i < v46; i++ { - this.LastResultsHash[i] = byte(r.Intn(256)) - } - v47 := r.Intn(100) - this.EvidenceHash = make([]byte, v47) - for i := 0; i < v47; i++ { - this.EvidenceHash[i] = byte(r.Intn(256)) - } - v48 := r.Intn(100) - this.ProposerAddress = make([]byte, v48) - for i := 0; i < v48; i++ { - this.ProposerAddress[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 15) - } - return this -} - -func NewPopulatedVersion(r randyTypes, easy bool) *Version { - this := &Version{} - this.Block = uint64(uint64(r.Uint32())) - this.App = uint64(uint64(r.Uint32())) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { - this := &BlockID{} - v49 := r.Intn(100) - this.Hash = make([]byte, v49) - for i := 0; i < v49; i++ { - this.Hash[i] = byte(r.Intn(256)) - } - v50 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v50 - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { - this := &PartSetHeader{} - this.Total = int32(r.Int31()) - if r.Intn(2) == 0 { - this.Total *= -1 - } - v51 := r.Intn(100) - this.Hash = make([]byte, v51) - for i := 0; i < v51; i++ { - this.Hash[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -func NewPopulatedValidator(r randyTypes, easy bool) *Validator { - this := &Validator{} - v52 := r.Intn(100) - this.Address = make([]byte, v52) - for i := 0; i < v52; i++ { - this.Address[i] = byte(r.Intn(256)) - } - this.Power = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Power *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) - } - return this -} - -func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { - this := &ValidatorUpdate{} - v53 := NewPopulatedPubKey(r, easy) - this.PubKey = *v53 - this.Power = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Power *= -1 - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return this + return dAtA[:n], nil } -func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { - this := &VoteInfo{} - v54 := NewPopulatedValidator(r, easy) - this.Validator = *v54 - this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { - this := &PubKey{} - this.Type = string(randStringTypes(r)) - v55 := r.Intn(100) - this.Data = make([]byte, v55) - for i := 0; i < v55; i++ { - this.Data[i] = byte(r.Intn(256)) +func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metadata) > 0 { + i -= len(m.Metadata) + copy(dAtA[i:], m.Metadata) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) + i-- + dAtA[i] = 0x2a } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 } - return this -} - -func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { - this := &Evidence{} - this.Type = string(randStringTypes(r)) - v56 := NewPopulatedValidator(r, easy) - this.Validator = *v56 - this.Height = int64(r.Int63()) - if r.Intn(2) == 0 { - this.Height *= -1 + if m.Chunks != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 } - v57 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v57 - this.TotalVotingPower = int64(r.Int63()) - if r.Intn(2) == 0 { - this.TotalVotingPower *= -1 + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 6) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 } - return this -} - -type randyTypes interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int + return len(dAtA) - i, nil } -func randUTF8RuneTypes(r randyTypes) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringTypes(r randyTypes) string { - v58 := r.Intn(100) - tmps := make([]rune, v58) - for i := 0; i < v58; i++ { - tmps[i] = randUTF8RuneTypes(r) - } - return string(tmps) -} -func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v59 := r.Int63() - if r.Intn(2) == 0 { - v59 *= -1 - } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v59)) - case 1: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 + offset++ } - dAtA = append(dAtA, uint8(v)) - return dAtA + dAtA[offset] = uint8(v) + return base } func (m *Request) Size() (n int) { if m == nil { @@ -8867,9 +6249,6 @@ func (m *Request) Size() (n int) { if m.Value != nil { n += m.Value.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -8909,18 +6288,6 @@ func (m *Request_Info) Size() (n int) { } return n } -func (m *Request_SetOption) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SetOption != nil { - l = m.SetOption.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Request_InitChain) Size() (n int) { if m == nil { return 0 @@ -8969,6 +6336,18 @@ func (m *Request_CheckTx) Size() (n int) { } return n } +func (m *Request_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *Request_EndBlock) Size() (n int) { if m == nil { return 0 @@ -8993,84 +6372,91 @@ func (m *Request_Commit) Size() (n int) { } return n } -func (m *Request_DeliverTx) Size() (n int) { +func (m *Request_ListSnapshots) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 2 + l + sovTypes(uint64(l)) + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *RequestEcho) Size() (n int) { +func (m *Request_OfferSnapshot) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Message) - if l > 0 { + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } - -func (m *RequestFlush) Size() (n int) { +func (m *Request_LoadSnapshotChunk) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) } return n } - -func (m *RequestInfo) Size() (n int) { +func (m *Request_ApplySnapshotChunk) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Version) - if l > 0 { + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.BlockVersion != 0 { - n += 1 + sovTypes(uint64(m.BlockVersion)) + return n +} +func (m *RequestEcho) Size() (n int) { + if m == nil { + return 0 } - if m.P2PVersion != 0 { - n += 1 + sovTypes(uint64(m.P2PVersion)) + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return n +} + +func (m *RequestFlush) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l return n } -func (m *RequestSetOption) Size() (n int) { +func (m *RequestInfo) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Key) + l = len(m.Version) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) } return n } @@ -9101,8 +6487,8 @@ func (m *RequestInitChain) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) } return n } @@ -9127,9 +6513,6 @@ func (m *RequestQuery) Size() (n int) { if m.Prove { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9153,9 +6536,6 @@ func (m *RequestBeginBlock) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9172,9 +6552,6 @@ func (m *RequestCheckTx) Size() (n int) { if m.Type != 0 { n += 1 + sovTypes(uint64(m.Type)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9188,9 +6565,6 @@ func (m *RequestDeliverTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9203,9 +6577,6 @@ func (m *RequestEndBlock) Size() (n int) { if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9215,8 +6586,69 @@ func (m *RequestCommit) Size() (n int) { } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return n +} + +func (m *RequestListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunk != 0 { + n += 1 + sovTypes(uint64(m.Chunk)) + } + return n +} + +func (m *RequestApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -9230,9 +6662,6 @@ func (m *Response) Size() (n int) { if m.Value != nil { n += m.Value.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9284,18 +6713,6 @@ func (m *Response_Info) Size() (n int) { } return n } -func (m *Response_SetOption) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SetOption != nil { - l = m.SetOption.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} func (m *Response_InitChain) Size() (n int) { if m == nil { return 0 @@ -9380,6 +6797,54 @@ func (m *Response_Commit) Size() (n int) { } return n } +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *ResponseException) Size() (n int) { if m == nil { return 0 @@ -9390,9 +6855,6 @@ func (m *ResponseException) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9406,9 +6868,6 @@ func (m *ResponseEcho) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9418,9 +6877,6 @@ func (m *ResponseFlush) Size() (n int) { } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9448,32 +6904,6 @@ func (m *ResponseInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResponseSetOption) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Code != 0 { - n += 1 + sovTypes(uint64(m.Code)) - } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9493,8 +6923,9 @@ func (m *ResponseInitChain) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -9527,8 +6958,8 @@ func (m *ResponseQuery) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.Proof != nil { - l = m.Proof.Size() + if m.ProofOps != nil { + l = m.ProofOps.Size() n += 1 + l + sovTypes(uint64(l)) } if m.Height != 0 { @@ -9538,9 +6969,6 @@ func (m *ResponseQuery) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9556,9 +6984,6 @@ func (m *ResponseBeginBlock) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9599,9 +7024,6 @@ func (m *ResponseCheckTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9642,9 +7064,6 @@ func (m *ResponseDeliverTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9670,9 +7089,6 @@ func (m *ResponseEndBlock) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9689,85 +7105,110 @@ func (m *ResponseCommit) Size() (n int) { if m.RetainHeight != 0 { n += 1 + sovTypes(uint64(m.RetainHeight)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return n +} + +func (m *ResponseListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Snapshots) > 0 { + for _, e := range m.Snapshots { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } } return n } -func (m *ConsensusParams) Size() (n int) { +func (m *ResponseOfferSnapshot) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Block != nil { - l = m.Block.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) } - if m.Evidence != nil { - l = m.Evidence.Size() - n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *ResponseLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 } - if m.Validator != nil { - l = m.Validator.Size() + var l int + _ = l + l = len(m.Chunk) + if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } -func (m *BlockParams) Size() (n int) { +func (m *ResponseApplySnapshotChunk) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.MaxBytes != 0 { - n += 1 + sovTypes(uint64(m.MaxBytes)) + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) } - if m.MaxGas != 0 { - n += 1 + sovTypes(uint64(m.MaxGas)) + if len(m.RefetchChunks) > 0 { + l = 0 + for _, e := range m.RefetchChunks { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.RejectSenders) > 0 { + for _, s := range m.RejectSenders { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } } return n } -func (m *EvidenceParams) Size() (n int) { +func (m *ConsensusParams) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.MaxAgeNumBlocks != 0 { - n += 1 + sovTypes(uint64(m.MaxAgeNumBlocks)) + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovTypes(uint64(l)) } - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration) - n += 1 + l + sovTypes(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) } return n } -func (m *ValidatorParams) Size() (n int) { +func (m *BlockParams) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.PubKeyTypes) > 0 { - for _, s := range m.PubKeyTypes { - l = len(s) - n += 1 + l + sovTypes(uint64(l)) - } + if m.MaxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxBytes)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.MaxGas != 0 { + n += 1 + sovTypes(uint64(m.MaxGas)) } return n } @@ -9787,9 +7228,6 @@ func (m *LastCommitInfo) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9809,125 +7247,47 @@ func (m *Event) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } -func (m *Header) Size() (n int) { +func (m *EventAttribute) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = m.Version.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ChainID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - l = m.LastBlockId.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.LastCommitHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.DataHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.ValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.NextValidatorsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.ConsensusHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.LastResultsHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.EvidenceHash) + l = len(m.Key) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.ProposerAddress) + l = len(m.Value) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Index { + n += 2 } return n } -func (m *Version) Size() (n int) { +func (m *TxResult) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Block != 0 { - n += 1 + sovTypes(uint64(m.Block)) - } - if m.App != 0 { - n += 1 + sovTypes(uint64(m.App)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) } - return n -} - -func (m *BlockID) Size() (n int) { - if m == nil { - return 0 + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) } - var l int - _ = l - l = len(m.Hash) + l = len(m.Tx) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = m.PartsHeader.Size() + l = m.Result.Size() n += 1 + l + sovTypes(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PartSetHeader) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovTypes(uint64(m.Total)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9944,9 +7304,6 @@ func (m *Validator) Size() (n int) { if m.Power != 0 { n += 1 + sovTypes(uint64(m.Power)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9961,9 +7318,6 @@ func (m *ValidatorUpdate) Size() (n int) { if m.Power != 0 { n += 1 + sovTypes(uint64(m.Power)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -9978,54 +7332,53 @@ func (m *VoteInfo) Size() (n int) { if m.SignedLastBlock { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } -func (m *PubKey) Size() (n int) { +func (m *Evidence) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + if m.TotalVotingPower != 0 { + n += 1 + sovTypes(uint64(m.TotalVotingPower)) } return n } -func (m *Evidence) Size() (n int) { +func (m *Snapshot) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Validator.Size() - n += 1 + l + sovTypes(uint64(l)) if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - if m.TotalVotingPower != 0 { - n += 1 + sovTypes(uint64(m.TotalVotingPower)) + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovTypes(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + l = len(m.Metadata) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } return n } @@ -10065,7 +7418,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } @@ -10100,7 +7453,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Echo{v} iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } @@ -10135,7 +7488,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Flush{v} iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } @@ -10170,9 +7523,44 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Info{v} iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10199,15 +7587,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestSetOption{} + v := &RequestQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_SetOption{v} + m.Value = &Request_Query{v} iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10234,15 +7622,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestInitChain{} + v := &RequestBeginBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_InitChain{v} + m.Value = &Request_BeginBlock{v} iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10269,15 +7657,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestQuery{} + v := &RequestCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_Query{v} + m.Value = &Request_CheckTx{v} iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10304,15 +7692,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestBeginBlock{} + v := &RequestDeliverTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_BeginBlock{v} + m.Value = &Request_DeliverTx{v} iNdEx = postIndex case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10339,15 +7727,50 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} + v := &RequestEndBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_CheckTx{v} + m.Value = &Request_EndBlock{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} iNdEx = postIndex case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10374,15 +7797,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestEndBlock{} + v := &RequestListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_EndBlock{v} + m.Value = &Request_ListSnapshots{v} iNdEx = postIndex case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10409,15 +7832,50 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCommit{} + v := &RequestOfferSnapshot{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_Commit{v} + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} iNdEx = postIndex - case 19: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10444,11 +7902,11 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestDeliverTx{} + v := &RequestApplySnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_DeliverTx{v} + m.Value = &Request_ApplySnapshotChunk{v} iNdEx = postIndex default: iNdEx = preIndex @@ -10465,7 +7923,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10551,7 +8008,6 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10599,56 +8055,279 @@ func (m *RequestFlush) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.ConsensusParams == nil { + m.ConsensusParams = &ConsensusParams{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10658,29 +8337,31 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Validators = append(m.Validators, ValidatorUpdate{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) } - m.BlockVersion = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10690,16 +8371,31 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockVersion |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) } - m.P2PVersion = 0 + m.InitialHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10709,7 +8405,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.P2PVersion |= uint64(b&0x7F) << shift + m.InitialHeight |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -10729,7 +8425,6 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10739,7 +8434,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestSetOption) Unmarshal(dAtA []byte) error { +func (m *RequestQuery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10762,17 +8457,17 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestSetOption: wiretype end group for non-group") + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestSetOption: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10782,27 +8477,29 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10830,8 +8527,47 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -10847,7 +8583,6 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -10857,7 +8592,7 @@ func (m *RequestSetOption) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestInitChain) Unmarshal(dAtA []byte) error { +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10880,17 +8615,17 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10900,30 +8635,31 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10933,27 +8669,28 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainId = string(dAtA[iNdEx:postIndex]) + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10980,16 +8717,13 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParams == nil { - m.ConsensusParams = &ConsensusParams{} - } - if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11016,14 +8750,67 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, ValidatorUpdate{}) - if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11050,11 +8837,30 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) - if m.AppStateBytes == nil { - m.AppStateBytes = []byte{} + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CheckTxType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11070,7 +8876,6 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11080,7 +8885,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestQuery) Unmarshal(dAtA []byte) error { +func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11103,15 +8908,15 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") + return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11138,44 +8943,65 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -11194,26 +9020,6 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Prove = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11229,7 +9035,6 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11239,7 +9044,7 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { +func (m *RequestCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11262,82 +9067,121 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - if byteLen < 0 { + if skippy < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - if msglen < 0 { + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11364,15 +9208,18 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11382,24 +9229,24 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} } iNdEx = postIndex default: @@ -11417,7 +9264,6 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11427,7 +9273,7 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { +func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11450,17 +9296,17 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var byteLen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11470,31 +9316,35 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) } - m.Type = 0 + m.Chunk = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11504,7 +9354,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + m.Chunk |= uint32(b&0x7F) << shift if b < 0x80 { break } @@ -11524,7 +9374,6 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11534,7 +9383,7 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { +func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11557,15 +9406,34 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11592,10 +9460,42 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Sender = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -11612,7 +9512,6 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -11622,7 +9521,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { +func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11645,17 +9544,17 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + return fmt.Errorf("proto: Response: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) } - m.Height = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11665,122 +9564,100 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestCommit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + v := &ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.Value = &Response_Exception{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if skippy < 0 { + m.Value = &Response_Echo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Response) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Response: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Value = &Response_Flush{v} + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11807,15 +9684,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseException{} + v := &ResponseInfo{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Exception{v} + m.Value = &Response_Info{v} iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11842,15 +9719,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEcho{} + v := &ResponseInitChain{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Echo{v} + m.Value = &Response_InitChain{v} iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11877,15 +9754,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseFlush{} + v := &ResponseQuery{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Flush{v} + m.Value = &Response_Query{v} iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11912,15 +9789,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInfo{} + v := &ResponseBeginBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Info{v} + m.Value = &Response_BeginBlock{v} iNdEx = postIndex - case 5: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SetOption", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11947,15 +9824,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseSetOption{} + v := &ResponseCheckTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_SetOption{v} + m.Value = &Response_CheckTx{v} iNdEx = postIndex - case 6: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11982,15 +9859,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} + v := &ResponseDeliverTx{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_InitChain{v} + m.Value = &Response_DeliverTx{v} iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12017,15 +9894,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} + v := &ResponseEndBlock{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Query{v} + m.Value = &Response_EndBlock{v} iNdEx = postIndex - case 8: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12052,15 +9929,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseBeginBlock{} + v := &ResponseCommit{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_BeginBlock{v} + m.Value = &Response_Commit{v} iNdEx = postIndex - case 9: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12087,15 +9964,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} + v := &ResponseListSnapshots{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_CheckTx{v} + m.Value = &Response_ListSnapshots{v} iNdEx = postIndex - case 10: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12122,15 +9999,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} + v := &ResponseOfferSnapshot{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_DeliverTx{v} + m.Value = &Response_OfferSnapshot{v} iNdEx = postIndex - case 11: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12157,15 +10034,15 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEndBlock{} + v := &ResponseLoadSnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_EndBlock{v} + m.Value = &Response_LoadSnapshotChunk{v} iNdEx = postIndex - case 12: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12192,11 +10069,11 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCommit{} + v := &ResponseApplySnapshotChunk{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Commit{v} + m.Value = &Response_ApplySnapshotChunk{v} iNdEx = postIndex default: iNdEx = preIndex @@ -12213,7 +10090,6 @@ func (m *Response) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12299,7 +10175,6 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12385,7 +10260,6 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12439,7 +10313,6 @@ func (m *ResponseFlush) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12580,150 +10453,11 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { break } } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastBlockAppHash == nil { - m.LastBlockAppHash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseSetOption: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseSetOption: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12733,23 +10467,25 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Info = string(dAtA[iNdEx:postIndex]) + m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastBlockAppHash == nil { + m.LastBlockAppHash = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -12766,7 +10502,6 @@ func (m *ResponseSetOption) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -12875,6 +10610,40 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12890,7 +10659,6 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -13101,7 +10869,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProofOps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13128,10 +10896,10 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Proof == nil { - m.Proof = &merkle.Proof{} + if m.ProofOps == nil { + m.ProofOps = &crypto.ProofOps{} } - if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProofOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13201,7 +10969,6 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -13289,7 +11056,6 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -13564,7 +11330,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -13724,199 +11489,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) } - m.GasWanted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Codespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) - if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) - } - var msglen int + m.GasWanted = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13926,29 +11499,31 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.GasWanted |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &ConsensusParams{} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 3: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } @@ -13982,65 +11557,11 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseCommit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14050,45 +11571,24 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) - } - m.RetainHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -14104,7 +11604,6 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14114,7 +11613,7 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConsensusParams) Unmarshal(dAtA []byte) error { +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14137,15 +11636,15 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14172,16 +11671,14 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Block == nil { - m.Block = &BlockParams{} - } - if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14208,16 +11705,16 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Evidence == nil { - m.Evidence = &EvidenceParams{} + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &ConsensusParams{} } - if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14244,10 +11741,8 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Validator == nil { - m.Validator = &ValidatorParams{} - } - if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14266,7 +11761,6 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14276,7 +11770,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockParams) Unmarshal(dAtA []byte) error { +func (m *ResponseCommit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14299,17 +11793,17 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - m.MaxBytes = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14319,16 +11813,31 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxBytes |= int64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) } - m.MaxGas = 0 + m.RetainHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14338,7 +11847,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxGas |= int64(b&0x7F) << shift + m.RetainHeight |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -14358,7 +11867,6 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14368,7 +11876,7 @@ func (m *BlockParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *EvidenceParams) Unmarshal(dAtA []byte) error { +func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14391,34 +11899,15 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EvidenceParams: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EvidenceParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeNumBlocks", wireType) - } - m.MaxAgeNumBlocks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxAgeNumBlocks |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeDuration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Snapshots", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14445,7 +11934,8 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MaxAgeDuration, dAtA[iNdEx:postIndex]); err != nil { + m.Snapshots = append(m.Snapshots, &Snapshot{}) + if err := m.Snapshots[len(m.Snapshots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14464,7 +11954,6 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14474,7 +11963,7 @@ func (m *EvidenceParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatorParams) Unmarshal(dAtA []byte) error { +func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14497,17 +11986,17 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatorParams: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatorParams: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PubKeyTypes", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + m.Result = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14517,24 +12006,11 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PubKeyTypes = append(m.PubKeyTypes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -14550,7 +12026,6 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14560,7 +12035,7 @@ func (m *ValidatorParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { +func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14581,38 +12056,19 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } } fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LastCommitInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LastCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) - } - m.Round = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Round |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14622,24 +12078,24 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Votes = append(m.Votes, VoteInfo{}) - if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} } iNdEx = postIndex default: @@ -14657,7 +12113,6 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14667,7 +12122,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *Event) Unmarshal(dAtA []byte) error { +func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14690,17 +12145,17 @@ func (m *Event) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + m.Result = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14710,29 +12165,92 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.RefetchChunks) == 0 { + m.RefetchChunks = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -14742,25 +12260,23 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, kv.Pair{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -14777,7 +12293,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14787,7 +12302,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } return nil } -func (m *Header) Unmarshal(dAtA []byte) error { +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14810,15 +12325,15 @@ func (m *Header) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Header: wiretype end group for non-group") + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14845,64 +12360,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Block == nil { + m.Block = &BlockParams{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChainID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14929,13 +12396,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + if m.Evidence == nil { + m.Evidence = &types1.EvidenceParams{} + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14962,151 +12432,18 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastCommitHash == nil { - m.LastCommitHash = []byte{} - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) - if m.DataHash == nil { - m.DataHash = []byte{} - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.ValidatorsHash == nil { - m.ValidatorsHash = []byte{} - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Validator == nil { + m.Validator = &types1.ValidatorParams{} } - m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) - if m.NextValidatorsHash == nil { - m.NextValidatorsHash = []byte{} + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 10: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15116,31 +12453,86 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) - if m.ConsensusHash == nil { - m.ConsensusHash = []byte{} + if m.Version == nil { + m.Version = &types1.VersionParams{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - var byteLen int + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15150,31 +12542,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.MaxBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) } - var byteLen int + m.MaxGas = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15184,31 +12561,69 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.MaxGas |= int64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastResultsHash == nil { - m.LastResultsHash = []byte{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var byteLen int + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LastCommitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LastCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15218,31 +12633,16 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Round |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) - if m.EvidenceHash == nil { - m.EvidenceHash = []byte{} - } - iNdEx = postIndex - case 14: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15252,24 +12652,24 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) - if m.ProposerAddress == nil { - m.ProposerAddress = []byte{} + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -15287,7 +12687,6 @@ func (m *Header) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15297,7 +12696,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } -func (m *Version) Unmarshal(dAtA []byte) error { +func (m *Event) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15320,17 +12719,17 @@ func (m *Version) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Version: wiretype end group for non-group") + return fmt.Errorf("proto: Event: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Block = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15340,16 +12739,29 @@ func (m *Version) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Block |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } - m.App = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15359,11 +12771,26 @@ func (m *Version) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.App |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, EventAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15379,7 +12806,6 @@ func (m *Version) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15389,7 +12815,7 @@ func (m *Version) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockID) Unmarshal(dAtA []byte) error { +func (m *EventAttribute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15412,15 +12838,15 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -15447,16 +12873,50 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartsHeader", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15466,25 +12926,12 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PartsHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Index = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15500,7 +12947,6 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15510,7 +12956,7 @@ func (m *BlockID) Unmarshal(dAtA []byte) error { } return nil } -func (m *PartSetHeader) Unmarshal(dAtA []byte) error { +func (m *TxResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15533,17 +12979,17 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + return fmt.Errorf("proto: TxResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TxResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.Total = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15553,14 +12999,33 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Total |= int32(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -15587,9 +13052,42 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -15607,7 +13105,6 @@ func (m *PartSetHeader) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15714,7 +13211,6 @@ func (m *Validator) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15820,7 +13316,6 @@ func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15927,7 +13422,6 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15937,7 +13431,7 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *PubKey) Unmarshal(dAtA []byte) error { +func (m *Evidence) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15960,17 +13454,17 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PubKey: wiretype end group for non-group") + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PubKey: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { + if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var stringLen uint64 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -15980,29 +13474,68 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Type |= EvidenceType(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16012,26 +13545,44 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -16047,7 +13598,6 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16057,7 +13607,7 @@ func (m *PubKey) Unmarshal(dAtA []byte) error { } return nil } -func (m *Evidence) Unmarshal(dAtA []byte) error { +func (m *Snapshot) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16080,17 +13630,17 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var stringLen uint64 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16100,29 +13650,16 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Height |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } - var msglen int + m.Format = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16132,30 +13669,16 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Format |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) } - m.Height = 0 + m.Chunks = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16165,16 +13688,16 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + m.Chunks |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16184,30 +13707,31 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - m.TotalVotingPower = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -16217,11 +13741,26 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TotalVotingPower |= int64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata[:0], dAtA[iNdEx:postIndex]...) + if m.Metadata == nil { + m.Metadata = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -16237,7 +13776,6 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } diff --git a/abci/types/types.proto b/abci/types/types.proto deleted file mode 100644 index 351329de1..000000000 --- a/abci/types/types.proto +++ /dev/null @@ -1,347 +0,0 @@ -syntax = "proto3"; -package tendermint.abci.types; -option go_package = "github.com/tendermint/tendermint/abci/types"; - -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -import "third_party/proto/gogoproto/gogo.proto"; -import "crypto/merkle/merkle.proto"; -import "libs/kv/types.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; - -// This file is copied from http://github.com/tendermint/abci -// NOTE: When using custom types, mind the warnings. -// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.goproto_registration) = true; -// Generate tests -option (gogoproto.populate_all) = true; -option (gogoproto.equal_all) = true; -option (gogoproto.testgen_all) = true; - -//---------------------------------------- -// Request types - -message Request { - oneof value { - RequestEcho echo = 2; - RequestFlush flush = 3; - RequestInfo info = 4; - RequestSetOption set_option = 5; - RequestInitChain init_chain = 6; - RequestQuery query = 7; - RequestBeginBlock begin_block = 8; - RequestCheckTx check_tx = 9; - RequestDeliverTx deliver_tx = 19; - RequestEndBlock end_block = 11; - RequestCommit commit = 12; - } -} - -message RequestEcho { - string message = 1; -} - -message RequestFlush {} - -message RequestInfo { - string version = 1; - uint64 block_version = 2; - uint64 p2p_version = 3; -} - -// nondeterministic -message RequestSetOption { - string key = 1; - string value = 2; -} - -message RequestInitChain { - google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - string chain_id = 2; - ConsensusParams consensus_params = 3; - repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; - bytes app_state_bytes = 5; -} - -message RequestQuery { - bytes data = 1; - string path = 2; - int64 height = 3; - bool prove = 4; -} - -message RequestBeginBlock { - bytes hash = 1; - Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; -} - -enum CheckTxType { - New = 0; - Recheck = 1; -} - -message RequestCheckTx { - bytes tx = 1; - CheckTxType type = 2; -} - -message RequestDeliverTx { - bytes tx = 1; -} - -message RequestEndBlock { - int64 height = 1; -} - -message RequestCommit {} - -//---------------------------------------- -// Response types - -message Response { - oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseSetOption set_option = 5; - ResponseInitChain init_chain = 6; - ResponseQuery query = 7; - ResponseBeginBlock begin_block = 8; - ResponseCheckTx check_tx = 9; - ResponseDeliverTx deliver_tx = 10; - ResponseEndBlock end_block = 11; - ResponseCommit commit = 12; - } -} - -// nondeterministic -message ResponseException { - string error = 1; -} - -message ResponseEcho { - string message = 1; -} - -message ResponseFlush {} - -message ResponseInfo { - string data = 1; - - string version = 2; - uint64 app_version = 3; - - int64 last_block_height = 4; - bytes last_block_app_hash = 5; -} - -// nondeterministic -message ResponseSetOption { - uint32 code = 1; - // bytes data = 2; - string log = 3; - string info = 4; -} - -message ResponseInitChain { - ConsensusParams consensus_params = 1; - repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; -} - -message ResponseQuery { - uint32 code = 1; - // bytes data = 2; // use "value" instead. - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 index = 5; - bytes key = 6; - bytes value = 7; - tendermint.crypto.merkle.Proof proof = 8; - int64 height = 9; - string codespace = 10; -} - -message ResponseBeginBlock { - repeated Event events = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCheckTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5; - int64 gas_used = 6; - repeated Event events = 7 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; -} - -message ResponseDeliverTx { - uint32 code = 1; - bytes data = 2; - string log = 3; // nondeterministic - string info = 4; // nondeterministic - int64 gas_wanted = 5; - int64 gas_used = 6; - repeated Event events = 7 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; - string codespace = 8; -} - -message ResponseEndBlock { - repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; - ConsensusParams consensus_param_updates = 2; - repeated Event events = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; -} - -message ResponseCommit { - // reserve 1 - bytes data = 2; - int64 retain_height = 3; -} - -//---------------------------------------- -// Misc. - -// ConsensusParams contains all consensus-relevant parameters -// that can be adjusted by the abci app -message ConsensusParams { - BlockParams block = 1; - EvidenceParams evidence = 2; - ValidatorParams validator = 3; -} - -// BlockParams contains limits on the block size. -message BlockParams { - // Note: must be greater than 0 - int64 max_bytes = 1; - // Note: must be greater or equal to -1 - int64 max_gas = 2; -} - -message EvidenceParams { - // Note: must be greater than 0 - int64 max_age_num_blocks = 1; - google.protobuf.Duration max_age_duration = 2 - [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; -} - -// ValidatorParams contains limits on validators. -message ValidatorParams { - repeated string pub_key_types = 1; -} - -message LastCommitInfo { - int32 round = 1; - repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; -} - -message Event { - string type = 1; - repeated tendermint.libs.kv.Pair attributes = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes,omitempty"]; -} - -//---------------------------------------- -// Blockchain Types - -message Header { - // basic block info - Version version = 1 [(gogoproto.nullable) = false]; - string chain_id = 2 [(gogoproto.customname) = "ChainID"]; - int64 height = 3; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - - // prev block info - BlockID last_block_id = 5 [(gogoproto.nullable) = false]; - - // hashes of block data - bytes last_commit_hash = 6; // commit from validators from the last block - bytes data_hash = 7; // transactions - - // hashes from the app output from the prev block - bytes validators_hash = 8; // validators for the current block - bytes next_validators_hash = 9; // validators for the next block - bytes consensus_hash = 10; // consensus params for current block - bytes app_hash = 11; // state after txs from the previous block - bytes last_results_hash = 12; // root hash of all results from the txs from the previous block - - // consensus info - bytes evidence_hash = 13; // evidence included in the block - bytes proposer_address = 14; // original proposer of the block -} - -message Version { - uint64 Block = 1; - uint64 App = 2; -} - -message BlockID { - bytes hash = 1; - PartSetHeader parts_header = 2 [(gogoproto.nullable) = false]; -} - -message PartSetHeader { - int32 total = 1; - bytes hash = 2; -} - -// Validator -message Validator { - bytes address = 1; - // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - int64 power = 3; -} - -// ValidatorUpdate -message ValidatorUpdate { - PubKey pub_key = 1 [(gogoproto.nullable) = false]; - int64 power = 2; -} - -// VoteInfo -message VoteInfo { - Validator validator = 1 [(gogoproto.nullable) = false]; - bool signed_last_block = 2; -} - -message PubKey { - string type = 1; - bytes data = 2; -} - -message Evidence { - string type = 1; - Validator validator = 2 [(gogoproto.nullable) = false]; - int64 height = 3; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; - int64 total_voting_power = 5; -} - -//---------------------------------------- -// Service Definition - -service ABCIApplication { - rpc Echo(RequestEcho) returns (ResponseEcho); - rpc Flush(RequestFlush) returns (ResponseFlush); - rpc Info(RequestInfo) returns (ResponseInfo); - rpc SetOption(RequestSetOption) returns (ResponseSetOption); - rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); - rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); - rpc Query(RequestQuery) returns (ResponseQuery); - rpc Commit(RequestCommit) returns (ResponseCommit); - rpc InitChain(RequestInitChain) returns (ResponseInitChain); - rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); - rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); -} diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go deleted file mode 100644 index eaa550054..000000000 --- a/abci/types/typespb_test.go +++ /dev/null @@ -1,4989 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: abci/types/types.proto - -package types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/golang/protobuf/ptypes/duration" - _ "github.com/golang/protobuf/ptypes/timestamp" - _ "github.com/tendermint/tendermint/crypto/merkle" - _ "github.com/tendermint/tendermint/libs/kv" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = golang_proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func TestRequestProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Request{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Request{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEchoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEcho{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestEchoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEcho{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestFlushProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestFlush{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestFlushMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestFlush{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInfoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestInfoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestSetOptionProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestSetOption{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestSetOptionMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestSetOption{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInitChainProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInitChain{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestInitChainMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInitChain{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestQueryProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestQuery{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestQueryMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestQuery{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBeginBlockProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBeginBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestBeginBlockMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBeginBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCheckTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCheckTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestCheckTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCheckTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestDeliverTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestDeliverTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestDeliverTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestDeliverTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEndBlockProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEndBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestEndBlockMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEndBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCommitProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCommit{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestCommitMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCommit{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Response{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Response{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseExceptionProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseException{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseExceptionMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseException{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEchoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEcho{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseEchoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEcho{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseFlushProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseFlush{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseFlushMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseFlush{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInfoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseInfoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseSetOptionProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseSetOption{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseSetOptionMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseSetOption{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInitChainProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInitChain{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseInitChainMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInitChain{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseQueryProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseQuery{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseQueryMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseQuery{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBeginBlockProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBeginBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseBeginBlockMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBeginBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCheckTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCheckTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseCheckTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCheckTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseDeliverTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseDeliverTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseDeliverTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseDeliverTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEndBlockProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEndBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseEndBlockMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEndBlock{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCommitProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCommit{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseCommitMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCommit{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestConsensusParamsProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ConsensusParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestConsensusParamsMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ConsensusParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockParamsProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestBlockParamsMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceParamsProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &EvidenceParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestEvidenceParamsMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &EvidenceParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorParamsProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestValidatorParamsMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorParams{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestLastCommitInfoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &LastCommitInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestLastCommitInfoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &LastCommitInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEventProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Event{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestEventMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Event{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestHeaderProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Header{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestHeaderMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Header{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVersionProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Version{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestVersionMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Version{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockIDProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockID{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestBlockIDMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockID{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPartSetHeaderProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PartSetHeader{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestPartSetHeaderMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PartSetHeader{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Validator{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestValidatorMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Validator{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorUpdateProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorUpdate{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestValidatorUpdateMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorUpdate{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVoteInfoProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &VoteInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestVoteInfoMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &VoteInfo{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPubKeyProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PubKey{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestPubKeyMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PubKey{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Evidence{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestEvidenceMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Evidence{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Request{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestEchoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEcho{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestFlushJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestFlush{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestInfoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInfo{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestSetOptionJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestSetOption{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestInitChainJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestInitChain{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestQueryJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestQuery{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestBeginBlockJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBeginBlock{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestCheckTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCheckTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestDeliverTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestDeliverTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestEndBlockJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestEndBlock{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestCommitJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestCommit{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Response{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseExceptionJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseException{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseEchoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEcho{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseFlushJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseFlush{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseInfoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInfo{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseSetOptionJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseSetOption{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseInitChainJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseInitChain{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseQueryJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseQuery{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseBeginBlockJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBeginBlock{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseCheckTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCheckTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseDeliverTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseDeliverTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseEndBlockJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseEndBlock{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseCommitJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseCommit{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestConsensusParamsJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ConsensusParams{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestBlockParamsJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockParams{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestEvidenceParamsJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &EvidenceParams{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestValidatorParamsJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorParams{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestLastCommitInfoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &LastCommitInfo{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestEventJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Event{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestHeaderJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Header{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestVersionJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Version{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestBlockIDJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &BlockID{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestPartSetHeaderJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PartSetHeader{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestValidatorJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Validator{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestValidatorUpdateJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ValidatorUpdate{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestVoteInfoJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &VoteInfo{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestPubKeyJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &PubKey{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestEvidenceJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Evidence{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Request{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Request{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEchoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestEcho{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEchoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestEcho{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestFlushProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestFlush{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestFlushProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestFlush{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInfoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInfoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestSetOptionProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestSetOption{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestSetOptionProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestSetOption{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInitChainProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestInitChain{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestInitChainProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestInitChain{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestQueryProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestQuery{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestQueryProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestQuery{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBeginBlockProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestBeginBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBeginBlockProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestBeginBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCheckTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestCheckTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCheckTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestCheckTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestDeliverTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestDeliverTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestDeliverTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestDeliverTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEndBlockProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestEndBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestEndBlockProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestEndBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCommitProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestCommit{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestCommitProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestCommit{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Response{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Response{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseExceptionProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseException{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseExceptionProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseException{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEchoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseEcho{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEchoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseEcho{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseFlushProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseFlush{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseFlushProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseFlush{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInfoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInfoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseSetOptionProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseSetOption{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseSetOptionProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseSetOption{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInitChainProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseInitChain{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseInitChainProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseInitChain{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseQueryProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseQuery{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseQueryProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseQuery{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBeginBlockProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseBeginBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBeginBlockProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseBeginBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCheckTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseCheckTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCheckTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseCheckTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseDeliverTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseDeliverTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseDeliverTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseDeliverTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEndBlockProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseEndBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseEndBlockProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseEndBlock{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCommitProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseCommit{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseCommitProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseCommit{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestConsensusParamsProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ConsensusParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestConsensusParamsProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ConsensusParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockParamsProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &BlockParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockParamsProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &BlockParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceParamsProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &EvidenceParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceParamsProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &EvidenceParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorParamsProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ValidatorParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorParamsProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ValidatorParams{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestLastCommitInfoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &LastCommitInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestLastCommitInfoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &LastCommitInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEventProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Event{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEventProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Event{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestHeaderProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Header{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestHeaderProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Header{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVersionProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Version{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVersionProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Version{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockIDProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &BlockID{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestBlockIDProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &BlockID{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPartSetHeaderProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &PartSetHeader{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPartSetHeaderProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &PartSetHeader{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Validator{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Validator{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorUpdateProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ValidatorUpdate{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestValidatorUpdateProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ValidatorUpdate{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVoteInfoProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &VoteInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestVoteInfoProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &VoteInfo{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPubKeyProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &PubKey{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPubKeyProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &PubKey{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Evidence{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestEvidenceProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Evidence{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequest(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestEchoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEcho(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestFlushSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestFlush(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestInfoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInfo(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestSetOptionSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestSetOption(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestInitChainSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestInitChain(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestQuerySize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestQuery(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestBeginBlockSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBeginBlock(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestCheckTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCheckTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestDeliverTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestDeliverTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestEndBlockSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestEndBlock(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestCommitSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestCommit(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponse(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseExceptionSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseException(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseEchoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEcho(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseFlushSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseFlush(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseInfoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInfo(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseSetOptionSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseSetOption(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseInitChainSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseInitChain(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseQuerySize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseQuery(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseBeginBlockSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBeginBlock(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseCheckTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCheckTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseDeliverTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseDeliverTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseEndBlockSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseEndBlock(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseCommitSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseCommit(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestConsensusParamsSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedConsensusParams(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestBlockParamsSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockParams(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestEvidenceParamsSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidenceParams(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestValidatorParamsSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorParams(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestLastCommitInfoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedLastCommitInfo(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestEventSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvent(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestHeaderSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedHeader(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestVersionSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVersion(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestBlockIDSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockID(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestPartSetHeaderSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPartSetHeader(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestValidatorSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidator(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestValidatorUpdateSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedValidatorUpdate(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestVoteInfoSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedVoteInfo(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestPubKeySize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPubKey(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestEvidenceSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedEvidence(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/abci/types/util.go b/abci/types/util.go index 3cde88232..8205fef7e 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "sort" ) @@ -24,11 +23,9 @@ func (v ValidatorUpdates) Len() int { // XXX: doesn't distinguish same validator with different power func (v ValidatorUpdates) Less(i, j int) bool { - return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0 + return v[i].PubKey.Compare(v[j].PubKey) <= 0 } func (v ValidatorUpdates) Swap(i, j int) { - v1 := v[i] - v[i] = v[j] - v[j] = v1 + v[i], v[j] = v[j], v[i] } diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 4aa8c2abb..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 1.0.{build} -configuration: Release -platform: -- x64 -- x86 -clone_folder: c:\go\path\src\github.com\tendermint\tendermint -before_build: -- cmd: set GOPATH=%GOROOT%\path -- cmd: set PATH=%GOPATH%\bin;%PATH% -build_script: -- cmd: make test -test: off diff --git a/behaviour/doc.go b/behaviour/doc.go new file mode 100644 index 000000000..40061e095 --- /dev/null +++ b/behaviour/doc.go @@ -0,0 +1,42 @@ +/* +Package Behaviour provides a mechanism for reactors to report behaviour of peers. + +Instead of a reactor calling the switch directly it will call the behaviour module which will +handle the stoping and marking peer as good on behalf of the reactor. + +There are four different behaviours a reactor can report. + +1. bad message + +type badMessage struct { + explanation string +} + +This message will request the peer be stopped for an error + +2. message out of order + +type messageOutOfOrder struct { + explanation string +} + +This message will request the peer be stopped for an error + +3. consesnsus Vote + +type consensusVote struct { + explanation string +} + +This message will request the peer be marked as good + +4. block part + +type blockPart struct { + explanation string +} + +This message will request the peer be marked as good + +*/ +package behaviour diff --git a/behaviour/reporter.go b/behaviour/reporter.go index 1f16b9bb3..8dc10389e 100644 --- a/behaviour/reporter.go +++ b/behaviour/reporter.go @@ -2,8 +2,8 @@ package behaviour import ( "errors" - "sync" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" ) @@ -50,7 +50,7 @@ func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error { // interface used in reactor tests to ensure reactors report the correct // behaviour in manufactured scenarios. type MockReporter struct { - mtx sync.RWMutex + mtx tmsync.RWMutex pb map[p2p.ID][]PeerBehaviour } diff --git a/behaviour/reporter_test.go b/behaviour/reporter_test.go index af61339aa..330505bd2 100644 --- a/behaviour/reporter_test.go +++ b/behaviour/reporter_test.go @@ -20,7 +20,9 @@ func TestMockReporter(t *testing.T) { } badMessage := bh.BadMessage(peerID, "bad message") - pr.Report(badMessage) + if err := pr.Report(badMessage); err != nil { + t.Error(err) + } behaviours = pr.GetBehaviours(peerID) if len(behaviours) != 1 { t.Error("Expected the peer have one reported behaviour") @@ -164,7 +166,9 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) { for { select { case pb := <-scriptItems: - pr.Report(pb.behaviour) + if err := pr.Report(pb.behaviour); err != nil { + t.Error(err) + } case <-done: return } diff --git a/blockchain/msgs.go b/blockchain/msgs.go new file mode 100644 index 000000000..cd5ef977f --- /dev/null +++ b/blockchain/msgs.go @@ -0,0 +1,108 @@ +package blockchain + +import ( + "errors" + "fmt" + + "github.com/gogo/protobuf/proto" + + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + "github.com/tendermint/tendermint/types" +) + +const ( + // NOTE: keep up to date with bcproto.BlockResponse + BlockResponseMessagePrefixSize = 4 + BlockResponseMessageFieldKeySize = 1 + MaxMsgSize = types.MaxBlockSizeBytes + + BlockResponseMessagePrefixSize + + BlockResponseMessageFieldKeySize +) + +// EncodeMsg encodes a Protobuf message +func EncodeMsg(pb proto.Message) ([]byte, error) { + msg := bcproto.Message{} + + switch pb := pb.(type) { + case *bcproto.BlockRequest: + msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb} + case *bcproto.BlockResponse: + msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb} + case *bcproto.NoBlockResponse: + msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb} + case *bcproto.StatusRequest: + msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb} + case *bcproto.StatusResponse: + msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb} + default: + return nil, fmt.Errorf("unknown message type %T", pb) + } + + bz, err := proto.Marshal(&msg) + if err != nil { + return nil, fmt.Errorf("unable to marshal %T: %w", pb, err) + } + + return bz, nil +} + +// DecodeMsg decodes a Protobuf message. +func DecodeMsg(bz []byte) (proto.Message, error) { + pb := &bcproto.Message{} + + err := proto.Unmarshal(bz, pb) + if err != nil { + return nil, err + } + + switch msg := pb.Sum.(type) { + case *bcproto.Message_BlockRequest: + return msg.BlockRequest, nil + case *bcproto.Message_BlockResponse: + return msg.BlockResponse, nil + case *bcproto.Message_NoBlockResponse: + return msg.NoBlockResponse, nil + case *bcproto.Message_StatusRequest: + return msg.StatusRequest, nil + case *bcproto.Message_StatusResponse: + return msg.StatusResponse, nil + default: + return nil, fmt.Errorf("unknown message type %T", msg) + } +} + +// ValidateMsg validates a message. +func ValidateMsg(pb proto.Message) error { + if pb == nil { + return errors.New("message cannot be nil") + } + + switch msg := pb.(type) { + case *bcproto.BlockRequest: + if msg.Height < 0 { + return errors.New("negative Height") + } + case *bcproto.BlockResponse: + // validate basic is called later when converting from proto + return nil + case *bcproto.NoBlockResponse: + if msg.Height < 0 { + return errors.New("negative Height") + } + case *bcproto.StatusResponse: + if msg.Base < 0 { + return errors.New("negative Base") + } + if msg.Height < 0 { + return errors.New("negative Height") + } + if msg.Base > msg.Height { + return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height) + } + case *bcproto.StatusRequest: + return nil + default: + return fmt.Errorf("unknown message type %T", msg) + } + return nil +} diff --git a/blockchain/msgs_test.go b/blockchain/msgs_test.go new file mode 100644 index 000000000..df8efca14 --- /dev/null +++ b/blockchain/msgs_test.go @@ -0,0 +1,125 @@ +package blockchain + +import ( + "encoding/hex" + "math" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" + "github.com/tendermint/tendermint/types" +) + +func TestBcBlockRequestMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + requestHeight int64 + expectErr bool + }{ + {"Valid Request Message", 0, false}, + {"Valid Request Message", 1, false}, + {"Invalid Request Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + request := bcproto.BlockRequest{Height: tc.requestHeight} + assert.Equal(t, tc.expectErr, ValidateMsg(&request) != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + nonResponseHeight int64 + expectErr bool + }{ + {"Valid Non-Response Message", 0, false}, + {"Valid Non-Response Message", 1, false}, + {"Invalid Non-Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + nonResponse := bcproto.NoBlockResponse{Height: tc.nonResponseHeight} + assert.Equal(t, tc.expectErr, ValidateMsg(&nonResponse) != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBcStatusRequestMessageValidateBasic(t *testing.T) { + request := bcproto.StatusRequest{} + assert.NoError(t, ValidateMsg(&request)) +} + +func TestBcStatusResponseMessageValidateBasic(t *testing.T) { + testCases := []struct { + testName string + responseHeight int64 + expectErr bool + }{ + {"Valid Response Message", 0, false}, + {"Valid Response Message", 1, false}, + {"Invalid Response Message", -1, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + response := bcproto.StatusResponse{Height: tc.responseHeight} + assert.Equal(t, tc.expectErr, ValidateMsg(&response) != nil, "Validate Basic had an unexpected result") + }) + } +} + +// nolint:lll // ignore line length in tests +func TestBlockchainMessageVectors(t *testing.T) { + block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil) + block.Version.Block = 11 // overwrite updated protocol version + + bpb, err := block.ToProto() + require.NoError(t, err) + + testCases := []struct { + testName string + bmsg proto.Message + expBytes string + }{ + {"BlockRequestMessage", &bcproto.Message{Sum: &bcproto.Message_BlockRequest{ + BlockRequest: &bcproto.BlockRequest{Height: 1}}}, "0a020801"}, + {"BlockRequestMessage", &bcproto.Message{Sum: &bcproto.Message_BlockRequest{ + BlockRequest: &bcproto.BlockRequest{Height: math.MaxInt64}}}, + "0a0a08ffffffffffffffff7f"}, + {"BlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_BlockResponse{ + BlockResponse: &bcproto.BlockResponse{Block: bpb}}}, "1a700a6e0a5b0a02080b1803220b088092b8c398feffffff012a0212003a20c4da88e876062aa1543400d50d0eaa0dac88096057949cfb7bca7f3a48c04bf96a20e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855120d0a0b48656c6c6f20576f726c641a00"}, + {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ + NoBlockResponse: &bcproto.NoBlockResponse{Height: 1}}}, "12020801"}, + {"NoBlockResponseMessage", &bcproto.Message{Sum: &bcproto.Message_NoBlockResponse{ + NoBlockResponse: &bcproto.NoBlockResponse{Height: math.MaxInt64}}}, + "120a08ffffffffffffffff7f"}, + {"StatusRequestMessage", &bcproto.Message{Sum: &bcproto.Message_StatusRequest{ + StatusRequest: &bcproto.StatusRequest{}}}, + "2200"}, + {"StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ + StatusResponse: &bcproto.StatusResponse{Height: 1, Base: 2}}}, + "2a0408011002"}, + {"StatusResponseMessage", &bcproto.Message{Sum: &bcproto.Message_StatusResponse{ + StatusResponse: &bcproto.StatusResponse{Height: math.MaxInt64, Base: math.MaxInt64}}}, + "2a1408ffffffffffffffff7f10ffffffffffffffff7f"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + bz, _ := proto.Marshal(tc.bmsg) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz)) + }) + } +} diff --git a/blockchain/v0/codec.go b/blockchain/v0/codec.go deleted file mode 100644 index f023bbfa1..000000000 --- a/blockchain/v0/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package v0 - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterBlockchainMessages(cdc) - types.RegisterBlockAmino(cdc) -} diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index bd8165752..69e0b55c4 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -4,14 +4,13 @@ import ( "errors" "fmt" "math" - "sync" "sync/atomic" "time" flow "github.com/tendermint/tendermint/libs/flowrate" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -64,7 +63,7 @@ type BlockPool struct { service.BaseService startTime time.Time - mtx sync.Mutex + mtx tmsync.Mutex // block requests requesters map[int64]*bpRequester height int64 // the lowest key in requesters. @@ -215,7 +214,9 @@ func (pool *BlockPool) PopRequest() { PanicSanity("PopRequest() requires a valid block") } */ - r.Stop() + if err := r.Stop(); err != nil { + pool.Logger.Error("Error stopping requester", "err", err) + } delete(pool.requesters, pool.height) pool.height++ } else { @@ -508,9 +509,9 @@ type bpRequester struct { pool *BlockPool height int64 gotBlockCh chan struct{} - redoCh chan p2p.ID //redo may send multitime, add peerId to identify repeat + redoCh chan p2p.ID // redo may send multitime, add peerId to identify repeat - mtx sync.Mutex + mtx tmsync.Mutex peerID p2p.ID block *types.Block } @@ -600,7 +601,7 @@ OUTER_LOOP: } peer = bpr.pool.pickIncrAvailablePeer(bpr.height) if peer == nil { - //log.Info("No peers available", "height", height) + // log.Info("No peers available", "height", height) time.Sleep(requestIntervalMS * time.Millisecond) continue PICK_PEER_LOOP } @@ -616,7 +617,9 @@ OUTER_LOOP: for { select { case <-bpr.pool.Quit(): - bpr.Stop() + if err := bpr.Stop(); err != nil { + bpr.Logger.Error("Error stopped requester", "err", err) + } return case <-bpr.Quit(): return diff --git a/blockchain/v0/pool_test.go b/blockchain/v0/pool_test.go index 9a3dd299c..1653fe74a 100644 --- a/blockchain/v0/pool_test.go +++ b/blockchain/v0/pool_test.go @@ -22,7 +22,7 @@ type testPeer struct { id p2p.ID base int64 height int64 - inputChan chan inputData //make sure each peer's data is sequential + inputChan chan inputData // make sure each peer's data is sequential } type inputData struct { @@ -90,7 +90,11 @@ func TestBlockPoolBasic(t *testing.T) { t.Error(err) } - defer pool.Stop() + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) peers.start() defer peers.stop() @@ -144,7 +148,11 @@ func TestBlockPoolTimeout(t *testing.T) { if err != nil { t.Error(err) } - defer pool.Stop() + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) for _, peer := range peers { t.Logf("Peer %v", peer.id) @@ -206,7 +214,11 @@ func TestBlockPoolRemovePeer(t *testing.T) { pool.SetLogger(log.TestingLogger()) err := pool.Start() require.NoError(t, err) - defer pool.Stop() + t.Cleanup(func() { + if err := pool.Stop(); err != nil { + t.Error(err) + } + }) // add peers for peerID, peer := range peers { diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index 247222160..dd3878669 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -1,15 +1,14 @@ package v0 import ( - "errors" "fmt" "reflect" "time" - amino "github.com/tendermint/go-amino" - + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -29,19 +28,12 @@ const ( statusUpdateIntervalSeconds = 10 // check if we should switch to consensus reactor switchToConsensusIntervalSeconds = 1 - - // NOTE: keep up to date with bcBlockResponseMessage - bcBlockResponseMessagePrefixSize = 4 - bcBlockResponseMessageFieldKeySize = 1 - maxMsgSize = types.MaxBlockSizeBytes + - bcBlockResponseMessagePrefixSize + - bcBlockResponseMessageFieldKeySize ) type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(sm.State, uint64) + SwitchToConsensus(state sm.State, skipWAL bool) } type peerError struct { @@ -83,11 +75,11 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *st const capacity = 1000 // must be bigger than peers count errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock - pool := NewBlockPool( - store.Height()+1, - requestsCh, - errorsCh, - ) + startHeight := store.Height() + 1 + if startHeight == 1 { + startHeight = state.InitialHeight + } + pool := NewBlockPool(startHeight, requestsCh, errorsCh) bcR := &BlockchainReactor{ initialState: state, @@ -115,14 +107,32 @@ func (bcR *BlockchainReactor) OnStart() error { if err != nil { return err } - go bcR.poolRoutine() + go bcR.poolRoutine(false) } return nil } +// SwitchToFastSync is called by the state sync reactor when switching to fast sync. +func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error { + bcR.fastSync = true + bcR.initialState = state + + bcR.pool.height = state.LastBlockHeight + 1 + err := bcR.pool.Start() + if err != nil { + return err + } + go bcR.poolRoutine(true) + return nil +} + // OnStop implements service.Service. func (bcR *BlockchainReactor) OnStop() { - bcR.pool.Stop() + if bcR.fastSync { + if err := bcR.pool.Stop(); err != nil { + bcR.Logger.Error("Error stopping pool", "err", err) + } + } } // GetChannels implements Reactor @@ -133,17 +143,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 10, SendQueueCapacity: 1000, RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: maxMsgSize, + RecvMessageCapacity: bc.MaxMsgSize, }, } } // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ - Height: bcR.store.Height(), + msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ Base: bcR.store.Base(), - }) + Height: bcR.store.Height()}) + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return + } + peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine @@ -158,31 +172,49 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { // respondToPeer loads a block and sends it to the requesting peer, // if we have it. Otherwise, we'll respond saying we don't have it. -func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, +func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { block := bcR.store.LoadBlock(msg.Height) if block != nil { - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) + bl, err := block.ToProto() + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return false + } + + msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl}) + if err != nil { + bcR.Logger.Error("could not marshal msg", "err", err) + return false + } + return src.TrySend(BlockchainChannel, msgBytes) } bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) - msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height}) + msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return false + } + return src.TrySend(BlockchainChannel, msgBytes) } // Receive implements Reactor by handling 4 types of messages (look below). +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) + msg, err := bc.DecodeMsg(msgBytes) if err != nil { bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) bcR.Switch.StopPeerForError(src, err) return } - if err = msg.ValidateBasic(); err != nil { + if err = bc.ValidateMsg(msg); err != nil { bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) bcR.Switch.StopPeerForError(src, err) return @@ -191,20 +223,31 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) switch msg := msg.(type) { - case *bcBlockRequestMessage: + case *bcproto.BlockRequest: bcR.respondToPeer(msg, src) - case *bcBlockResponseMessage: - bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) - case *bcStatusRequestMessage: + case *bcproto.BlockResponse: + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + bcR.Logger.Error("Block content is invalid", "err", err) + bcR.Switch.StopPeerForError(src, err) + return + } + bcR.pool.AddBlock(src.ID(), bi, len(msgBytes)) + case *bcproto.StatusRequest: // Send peer our state. - src.TrySend(BlockchainChannel, cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ Height: bcR.store.Height(), Base: bcR.store.Base(), - })) - case *bcStatusResponseMessage: + }) + if err != nil { + bcR.Logger.Error("could not convert msg to protobut", "err", err) + return + } + src.TrySend(BlockchainChannel, msgBytes) + case *bcproto.StatusResponse: // Got a peer status. Unverified. bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) - case *bcNoBlockResponseMessage: + case *bcproto.NoBlockResponse: bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) @@ -213,7 +256,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (bcR *BlockchainReactor) poolRoutine() { +func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) @@ -241,7 +284,12 @@ func (bcR *BlockchainReactor) poolRoutine() { if peer == nil { continue } - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height}) + msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height}) + if err != nil { + bcR.Logger.Error("could not convert msg to proto", "err", err) + continue + } + queued := peer.TrySend(BlockchainChannel, msgBytes) if !queued { bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height) @@ -270,10 +318,12 @@ FOR_LOOP: "outbound", outbound, "inbound", inbound) if bcR.pool.IsCaughtUp() { bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) - bcR.pool.Stop() + if err := bcR.pool.Stop(); err != nil { + bcR.Logger.Error("Error stopping pool", "err", err) + } conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(state, blocksSynced) + conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) } // else { // should only happen during testing @@ -299,7 +349,7 @@ FOR_LOOP: // See if there are any blocks to sync. first, second := bcR.pool.PeekTwoBlocks() - //bcR.Logger.Info("TrySync peeked", "first", first, "second", second) + // bcR.Logger.Info("TrySync peeked", "first", first, "second", second) if first == nil || second == nil { // We need both to sync the first block. continue FOR_LOOP @@ -309,13 +359,13 @@ FOR_LOOP: } firstParts := first.MakePartSet(types.BlockPartSizeBytes) - firstPartsHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader} + firstPartSetHeader := firstParts.Header() + firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommit( + err := state.Validators.VerifyCommitLight( chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("Error in validation", "err", err) @@ -367,135 +417,13 @@ FOR_LOOP: // BroadcastStatusRequest broadcasts `BlockStore` base and height. func (bcR *BlockchainReactor) BroadcastStatusRequest() error { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ - Base: bcR.store.Base(), - Height: bcR.store.Height(), - }) - bcR.Switch.Broadcast(BlockchainChannel, msgBytes) - return nil -} - -//----------------------------------------------------------------------------- -// Messages - -// BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface { - ValidateBasic() error -} - -// RegisterBlockchainMessages registers the fast sync messages for amino encoding. -func RegisterBlockchainMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) -} - -func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- - -type bcBlockRequestMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcBlockRequestMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcBlockRequestMessage) String() string { - return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) -} - -type bcNoBlockResponseMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcNoBlockResponseMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcNoBlockResponseMessage) String() string { - return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height) -} - -//------------------------------------- - -type bcBlockResponseMessage struct { - Block *types.Block -} - -// ValidateBasic performs basic validation. -func (m *bcBlockResponseMessage) ValidateBasic() error { - return m.Block.ValidateBasic() -} - -func (m *bcBlockResponseMessage) String() string { - return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) -} - -//------------------------------------- - -type bcStatusRequestMessage struct { - Height int64 - Base int64 -} - -// ValidateBasic performs basic validation. -func (m *bcStatusRequestMessage) ValidateBasic() error { - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + bm, err := bc.EncodeMsg(&bcproto.StatusRequest{}) + if err != nil { + bcR.Logger.Error("could not convert msg to proto", "err", err) + return fmt.Errorf("could not convert msg to proto: %w", err) } - return nil -} - -func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) -} -//------------------------------------- - -type bcStatusResponseMessage struct { - Height int64 - Base int64 -} + bcR.Switch.Broadcast(BlockchainChannel, bm) -// ValidateBasic performs basic validation. -func (m *bcStatusResponseMessage) ValidateBasic() error { - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) - } return nil } - -func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) -} diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index a31c9a141..a88b499f4 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -1,20 +1,21 @@ package v0 import ( + "fmt" "os" "sort" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/mock" + "github.com/tendermint/tendermint/mempool/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -64,16 +65,17 @@ func newBlockchainReactor( proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(errors.Wrap(err, "error start app")) + panic(fmt.Errorf("error start app: %w", err)) } blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { - panic(errors.Wrap(err, "error constructing state from genesis file")) + panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } // Make the BlockchainReactor itself. @@ -81,9 +83,12 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.EmptyEvidencePool{}) + if err = stateStore.Save(state); err != nil { + panic(err) + } // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -110,11 +115,11 @@ func newBlockchainReactor( thisBlock := makeBlock(blockHeight, state, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} + blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(errors.Wrap(err, "error apply block")) + panic(fmt.Errorf("error apply block: %w", err)) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -146,8 +151,10 @@ func TestNoBlockResponse(t *testing.T) { defer func() { for _, r := range reactorPairs { - r.reactor.Stop() - r.app.Stop() + err := r.reactor.Stop() + require.NoError(t, err) + err = r.app.Stop() + require.NoError(t, err) } }() @@ -193,10 +200,15 @@ func TestBadBlockStopsPeer(t *testing.T) { maxBlockHeight := int64(148) - otherChain := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight) + // Other chain needs a different validator set + otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30) + otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight) + defer func() { - otherChain.reactor.Stop() - otherChain.app.Stop() + err := otherChain.reactor.Stop() + require.Error(t, err) + err = otherChain.app.Stop() + require.NoError(t, err) }() reactorPairs := make([]BlockchainReactorPair, 4) @@ -214,23 +226,32 @@ func TestBadBlockStopsPeer(t *testing.T) { defer func() { for _, r := range reactorPairs { - r.reactor.Stop() - r.app.Stop() + err := r.reactor.Stop() + require.NoError(t, err) + + err = r.app.Stop() + require.NoError(t, err) } }() for { - if reactorPairs[3].reactor.pool.IsCaughtUp() { + time.Sleep(1 * time.Second) + caughtUp := true + for _, r := range reactorPairs { + if !r.reactor.pool.IsCaughtUp() { + caughtUp = false + } + } + if caughtUp { break } - - time.Sleep(1 * time.Second) } - //at this time, reactors[0-3] is the newest + // at this time, reactors[0-3] is the newest assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size()) - //mark reactorPairs[3] is an invalid peer + // Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data + // race, but can't be easily avoided. reactorPairs[3].reactor.store = otherChain.reactor.store lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0) @@ -257,86 +278,6 @@ func TestBadBlockStopsPeer(t *testing.T) { assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1) } -func TestBcBlockRequestMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - requestHeight int64 - expectErr bool - }{ - {"Valid Request Message", 0, false}, - {"Valid Request Message", 1, false}, - {"Invalid Request Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - request := bcBlockRequestMessage{Height: tc.requestHeight} - assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - nonResponseHeight int64 - expectErr bool - }{ - {"Valid Non-Response Message", 0, false}, - {"Valid Non-Response Message", 1, false}, - {"Invalid Non-Response Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} - assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcStatusRequestMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - requestHeight int64 - expectErr bool - }{ - {"Valid Request Message", 0, false}, - {"Valid Request Message", 1, false}, - {"Invalid Request Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - request := bcStatusRequestMessage{Height: tc.requestHeight} - assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcStatusResponseMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - responseHeight int64 - expectErr bool - }{ - {"Valid Response Message", 0, false}, - {"Valid Response Message", 1, false}, - {"Invalid Response Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - response := bcStatusResponseMessage{Height: tc.responseHeight} - assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - //---------------------------------------------- // utility funcs diff --git a/blockchain/v1/codec.go b/blockchain/v1/codec.go deleted file mode 100644 index ce4f7dfab..000000000 --- a/blockchain/v1/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package v1 - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterBlockchainMessages(cdc) - types.RegisterBlockAmino(cdc) -} diff --git a/blockchain/v1/peer.go b/blockchain/v1/peer.go index ad26585b3..a9b5e0379 100644 --- a/blockchain/v1/peer.go +++ b/blockchain/v1/peer.go @@ -31,6 +31,7 @@ type BpPeer struct { Height int64 // the peer reported height NumPendingBlockRequests int // number of requests still waiting for block responses blocks map[int64]*types.Block // blocks received or expected to be received from this peer + noBlocks map[int64]struct{} // heights for which the peer does not have blocks blockResponseTimer *time.Timer recvMonitor *flow.Monitor params *BpPeerParams // parameters for timer and monitor @@ -46,13 +47,14 @@ func NewBpPeer(peerID p2p.ID, base int64, height int64, params = BpPeerDefaultParams() } return &BpPeer{ - ID: peerID, - Base: base, - Height: height, - blocks: make(map[int64]*types.Block, maxRequestsPerPeer), - logger: log.NewNopLogger(), - onErr: onErr, - params: params, + ID: peerID, + Base: base, + Height: height, + blocks: make(map[int64]*types.Block, maxRequestsPerPeer), + noBlocks: make(map[int64]struct{}), + logger: log.NewNopLogger(), + onErr: onErr, + params: params, } } @@ -131,6 +133,19 @@ func (peer *BpPeer) RemoveBlock(height int64) { delete(peer.blocks, height) } +// SetNoBlock records that the peer does not have a block for height. +func (peer *BpPeer) SetNoBlock(height int64) { + peer.noBlocks[height] = struct{}{} +} + +// NoBlock returns true if the peer does not have a block for height. +func (peer *BpPeer) NoBlock(height int64) bool { + if _, ok := peer.noBlocks[height]; ok { + return true + } + return false +} + // RequestSent records that a request was sent, and starts the peer timer and monitor if needed. func (peer *BpPeer) RequestSent(height int64) { peer.blocks[height] = nil diff --git a/blockchain/v1/peer_test.go b/blockchain/v1/peer_test.go index 0e7a73473..fd9e9f14b 100644 --- a/blockchain/v1/peer_test.go +++ b/blockchain/v1/peer_test.go @@ -32,7 +32,7 @@ func TestPeerResetBlockResponseTimer(t *testing.T) { lastErr error // last generated error peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine ) - params := &BpPeerParams{timeout: 2 * time.Millisecond} + params := &BpPeerParams{timeout: 20 * time.Millisecond} peer := NewBpPeer( p2p.ID(tmrand.Str(12)), 0, 10, @@ -55,12 +55,12 @@ func TestPeerResetBlockResponseTimer(t *testing.T) { // reset with running timer peer.resetBlockResponseTimer() - time.Sleep(time.Millisecond) + time.Sleep(5 * time.Millisecond) peer.resetBlockResponseTimer() assert.NotNil(t, peer.blockResponseTimer) // let the timer expire and ... - time.Sleep(3 * time.Millisecond) + time.Sleep(50 * time.Millisecond) // ... check timer is not running checkByStoppingPeerTimer(t, peer, false) @@ -181,7 +181,7 @@ func TestPeerAddBlock(t *testing.T) { func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) { - params := &BpPeerParams{timeout: 2 * time.Millisecond} + params := &BpPeerParams{timeout: 10 * time.Millisecond} var ( numErrFuncCalls int // number of calls to the onErr function lastErr error // last generated error @@ -201,7 +201,7 @@ func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) { peer.SetLogger(log.TestingLogger()) peer.RequestSent(1) - time.Sleep(4 * time.Millisecond) + time.Sleep(50 * time.Millisecond) // timer should have expired by now, check that the on error function was called peerTestMtx.Lock() assert.Equal(t, 1, numErrFuncCalls) diff --git a/blockchain/v1/pool.go b/blockchain/v1/pool.go index 27e0f3a04..9c037183d 100644 --- a/blockchain/v1/pool.go +++ b/blockchain/v1/pool.go @@ -100,6 +100,18 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error return nil } +// SetNoBlock records that the peer does not have a block for height and +// schedules a new request for that height from another peer. +func (pool *BlockPool) SetNoBlock(peerID p2p.ID, height int64) { + peer := pool.peers[peerID] + if peer == nil { + return + } + peer.SetNoBlock(height) + + pool.rescheduleRequest(peerID, height) +} + // Cleans and deletes the peer. Recomputes the max peer height. func (pool *BlockPool) deletePeer(peer *BpPeer) { if peer == nil { @@ -214,7 +226,7 @@ func (pool *BlockPool) sendRequest(height int64) bool { if peer.NumPendingBlockRequests >= maxRequestsPerPeer { continue } - if peer.Base > height || peer.Height < height { + if peer.Base > height || peer.Height < height || peer.NoBlock(height) { continue } diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 28a314b8a..78ce71e18 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -1,16 +1,15 @@ package v1 import ( - "errors" "fmt" "reflect" "time" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/behaviour" + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -24,13 +23,6 @@ const ( // ask for best height every 10s statusUpdateIntervalSeconds = 10 - - // NOTE: keep up to date with bcBlockResponseMessage - bcBlockResponseMessagePrefixSize = 4 - bcBlockResponseMessageFieldKeySize = 1 - maxMsgSize = types.MaxBlockSizeBytes + - bcBlockResponseMessagePrefixSize + - bcBlockResponseMessageFieldKeySize ) var ( @@ -44,7 +36,7 @@ var ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(sm.State, uint64) + SwitchToConsensus(state sm.State, skipWAL bool) } // BlockchainReactor handles long-term catchup syncing. @@ -57,7 +49,8 @@ type BlockchainReactor struct { blockExec *sm.BlockExecutor store *store.BlockStore - fastSync bool + fastSync bool + stateSynced bool fsm *BcReactorFSM blocksSynced uint64 @@ -91,6 +84,9 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *st errorsForFSMCh := make(chan bcReactorMessage, capacity) startHeight := store.Height() + 1 + if startHeight == 1 { + startHeight = state.InitialHeight + } bcR := &BlockchainReactor{ initialState: state, state: state, @@ -104,7 +100,7 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *st fsm := NewFSM(startHeight, bcR) bcR.fsm = fsm bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR) - //bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch) + // bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch) return bcR } @@ -154,6 +150,19 @@ func (bcR *BlockchainReactor) OnStop() { _ = bcR.Stop() } +// SwitchToFastSync is called by the state sync reactor when switching to fast sync. +func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error { + bcR.fastSync = true + bcR.initialState = state + bcR.state = state + bcR.stateSynced = true + + bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR) + bcR.fsm.SetLogger(bcR.Logger) + go bcR.poolRoutine() + return nil +} + // GetChannels implements Reactor func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ @@ -162,17 +171,21 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 10, SendQueueCapacity: 2000, RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: maxMsgSize, + RecvMessageCapacity: bc.MaxMsgSize, }, } } // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ Base: bcR.store.Base(), Height: bcR.store.Height(), }) + if err != nil { + bcR.Logger.Error("could not convert msg to protobuf", "err", err) + return + } peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine @@ -183,26 +196,44 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { // sendBlockToPeer loads a block and sends it to the requesting peer. // If the block doesn't exist a bcNoBlockResponseMessage is sent. // If all nodes are honest, no node should be requesting for a block that doesn't exist. -func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage, +func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { block := bcR.store.LoadBlock(msg.Height) if block != nil { - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) + pbbi, err := block.ToProto() + if err != nil { + bcR.Logger.Error("Could not send block message to peer", "err", err) + return false + } + msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi}) + if err != nil { + bcR.Logger.Error("unable to marshal msg", "err", err) + return false + } return src.TrySend(BlockchainChannel, msgBytes) } bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height) - msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height}) + msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height}) + if err != nil { + bcR.Logger.Error("unable to marshal msg", "err", err) + return false + } return src.TrySend(BlockchainChannel, msgBytes) } -func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ +func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) { + msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{ Base: bcR.store.Base(), Height: bcR.store.Height(), }) + if err != nil { + bcR.Logger.Error("unable to marshal msg", "err", err) + return false + } + return src.TrySend(BlockchainChannel, msgBytes) } @@ -219,8 +250,10 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { } // Receive implements Reactor by handling 4 types of messages (look below). +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) + msg, err := bc.DecodeMsg(msgBytes) if err != nil { bcR.Logger.Error("error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) @@ -228,7 +261,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) return } - if err = msg.ValidateBasic(); err != nil { + if err = bc.ValidateMsg(msg); err != nil { bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) return @@ -237,33 +270,49 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) switch msg := msg.(type) { - case *bcBlockRequestMessage: + case *bcproto.BlockRequest: if queued := bcR.sendBlockToPeer(msg, src); !queued { // Unfortunately not queued since the queue is full. bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height) } - case *bcStatusRequestMessage: + case *bcproto.StatusRequest: // Send peer our state. if queued := bcR.sendStatusResponseToPeer(msg, src); !queued { // Unfortunately not queued since the queue is full. bcR.Logger.Error("Could not send status message to peer", "src", src) } - case *bcBlockResponseMessage: + case *bcproto.BlockResponse: + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + bcR.Logger.Error("error transition block from protobuf", "err", err) + _ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error())) + return + } msgForFSM := bcReactorMessage{ event: blockResponseEv, data: bReactorEventData{ peerID: src.ID(), - height: msg.Block.Height, - block: msg.Block, + height: bi.Height, + block: bi, length: len(msgBytes), }, } - bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height) + bcR.Logger.Info("Received", "src", src, "height", bi.Height) + bcR.messagesForFSMCh <- msgForFSM + case *bcproto.NoBlockResponse: + msgForFSM := bcReactorMessage{ + event: noBlockResponseEv, + data: bReactorEventData{ + peerID: src.ID(), + height: msg.Height, + }, + } + bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) bcR.messagesForFSMCh <- msgForFSM - case *bcStatusResponseMessage: + case *bcproto.StatusResponse: // Got a peer status. Unverified. msgForFSM := bcReactorMessage{ event: statusResponseEv, @@ -421,13 +470,13 @@ func (bcR *BlockchainReactor) processBlock() error { chainID := bcR.initialState.ChainID firstParts := first.MakePartSet(types.BlockPartSizeBytes) - firstPartsHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader} + firstPartSetHeader := firstParts.Header() + firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) + err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("error during commit verification", "err", err, "first", first.Height, "second", second.Height) @@ -447,10 +496,10 @@ func (bcR *BlockchainReactor) processBlock() error { // Implements bcRNotifier // sendStatusRequest broadcasts `BlockStore` height. func (bcR *BlockchainReactor) sendStatusRequest() { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ - Base: bcR.store.Base(), - Height: bcR.store.Height(), - }) + msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) + if err != nil { + panic(err) + } bcR.Switch.Broadcast(BlockchainChannel, msgBytes) } @@ -462,7 +511,10 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro return errNilPeerForBlockRequest } - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{height}) + msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) + if err != nil { + return err + } queued := peer.TrySend(BlockchainChannel, msgBytes) if !queued { return errSendQueueFull @@ -474,7 +526,7 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro func (bcR *BlockchainReactor) switchToConsensus() { conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(bcR.state, bcR.blocksSynced) + conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced) bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv} } // else { @@ -519,128 +571,3 @@ func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, t (*timer).Reset(timeout) } } - -//----------------------------------------------------------------------------- -// Messages - -// BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface { - ValidateBasic() error -} - -// RegisterBlockchainMessages registers the fast sync messages for amino encoding. -func RegisterBlockchainMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) -} - -func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- - -type bcBlockRequestMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcBlockRequestMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcBlockRequestMessage) String() string { - return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) -} - -type bcNoBlockResponseMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcNoBlockResponseMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcNoBlockResponseMessage) String() string { - return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height) -} - -//------------------------------------- - -type bcBlockResponseMessage struct { - Block *types.Block -} - -// ValidateBasic performs basic validation. -func (m *bcBlockResponseMessage) ValidateBasic() error { - return m.Block.ValidateBasic() -} - -func (m *bcBlockResponseMessage) String() string { - return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) -} - -//------------------------------------- - -type bcStatusRequestMessage struct { - Height int64 - Base int64 -} - -// ValidateBasic performs basic validation. -func (m *bcStatusRequestMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) - } - return nil -} - -func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) -} - -//------------------------------------- - -type bcStatusResponseMessage struct { - Height int64 - Base int64 -} - -// ValidateBasic performs basic validation. -func (m *bcStatusResponseMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) - } - return nil -} - -func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) -} diff --git a/blockchain/v1/reactor_fsm.go b/blockchain/v1/reactor_fsm.go index 0f65f9d66..384ea5c28 100644 --- a/blockchain/v1/reactor_fsm.go +++ b/blockchain/v1/reactor_fsm.go @@ -74,6 +74,7 @@ const ( startFSMEv = iota + 1 statusResponseEv blockResponseEv + noBlockResponseEv processedBlockEv makeRequestsEv stopFSMEv @@ -94,6 +95,9 @@ func (msg *bcReactorMessage) String() string { case blockResponseEv: dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v", msg.data.peerID, msg.data.block.Height, msg.data.length) + case noBlockResponseEv: + dataStr = fmt.Sprintf("peer=%v requested height=%v", + msg.data.peerID, msg.data.height) case processedBlockEv: dataStr = fmt.Sprintf("error=%v", msg.data.err) case makeRequestsEv: @@ -105,7 +109,7 @@ func (msg *bcReactorMessage) String() string { case stateTimeoutEv: dataStr = fmt.Sprintf("state=%v", msg.data.stateName) default: - dataStr = fmt.Sprintf("cannot interpret message data") + dataStr = "cannot interpret message data" } return fmt.Sprintf("%v: %v", msg.event, dataStr) @@ -119,6 +123,8 @@ func (ev bReactorEvent) String() string { return "statusResponseEv" case blockResponseEv: return "blockResponseEv" + case noBlockResponseEv: + return "noBlockResponseEv" case processedBlockEv: return "processedBlockEv" case makeRequestsEv: @@ -269,7 +275,11 @@ func init() { return waitForPeer, err } return waitForBlock, err + case noBlockResponseEv: + fsm.logger.Error("peer does not have requested block", "peer", data.peerID) + fsm.pool.SetNoBlock(data.peerID, data.height) + return waitForBlock, nil case processedBlockEv: if data.err != nil { first, second, _ := fsm.pool.FirstTwoBlocksAndPeers() diff --git a/blockchain/v1/reactor_fsm_test.go b/blockchain/v1/reactor_fsm_test.go index 5980ceb08..9fdfe9c9b 100644 --- a/blockchain/v1/reactor_fsm_test.go +++ b/blockchain/v1/reactor_fsm_test.go @@ -102,6 +102,19 @@ func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTest } } +func sNoBlockResponseEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues { + return fsmStepTestValues{ + currentState: current, + event: noBlockResponseEv, + data: bReactorEventData{ + peerID: peerID, + height: height, + }, + wantState: expected, + wantErr: err, + } +} + func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues { return fsmStepTestValues{ currentState: current, @@ -354,6 +367,46 @@ func TestFSMBlockVerificationFailure(t *testing.T) { executeFSMTests(t, tests, false) } +func TestFSMNoBlockResponse(t *testing.T) { + tests := []testFields{ + { + name: "no block response", + startingHeight: 1, + maxRequestsPerPeer: 3, + steps: []fsmStepTestValues{ + sStartFSMEv(), + + // add P1 and get blocks 1-3 from it + sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil), + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}), + + // add P2 + sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil), + + // process block failure, should remove P1 and all blocks + sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 1, nil), + sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 2, nil), + sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 3, nil), + + // get blocks 1-3 from P2 + sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}), + sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}), + + // finish after processing blocks 1 and 2 + sProcessedBlockEv("waitForBlock", "waitForBlock", nil), + sProcessedBlockEv("waitForBlock", "finished", nil), + }, + }, + } + + executeFSMTests(t, tests, false) +} + func TestFSMBadBlockFromPeer(t *testing.T) { tests := []testFields{ { @@ -822,7 +875,7 @@ const ( maxRequestsPerPeerTest = 20 maxTotalPendingRequestsTest = 600 maxNumPeersTest = 1000 - maxNumBlocksInChainTest = 10000 //should be smaller than 9999999 + maxNumBlocksInChainTest = 10000 // should be smaller than 9999999 ) func makeCorrectTransitionSequenceWithRandomParameters() testFields { diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index e0b3472bf..c0f371905 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,8 +16,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/mock" + "github.com/tendermint/tendermint/mempool/mock" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -65,11 +65,14 @@ func makeVote( Height: header.Height, Round: 1, Timestamp: tmtime.Now(), - Type: types.PrecommitType, + Type: tmproto.PrecommitType, BlockID: blockID, } - _ = privVal.SignVote(header.ChainID, vote) + vpb := vote.ToProto() + + _ = privVal.SignVote(header.ChainID, vpb) + vote.Signature = vpb.Signature return vote } @@ -94,16 +97,17 @@ func newBlockchainReactor( proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(errors.Wrap(err, "error start app")) + panic(fmt.Errorf("error start app: %w", err)) } blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { - panic(errors.Wrap(err, "error constructing state from genesis file")) + panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } // Make the BlockchainReactor itself. @@ -111,9 +115,12 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.EmptyEvidencePool{}) + if err = stateStore.Save(state); err != nil { + panic(err) + } // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -129,11 +136,11 @@ func newBlockchainReactor( thisBlock := makeBlock(blockHeight, state, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} + blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(errors.Wrap(err, "error apply block")) + panic(fmt.Errorf("error apply block: %w", err)) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -166,7 +173,7 @@ type consensusReactorTest struct { mtx sync.Mutex } -func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced uint64) { +func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) { conR.mtx.Lock() defer conR.mtx.Unlock() conR.switchedToConsensus = true @@ -298,10 +305,10 @@ outerFor: break } - //at this time, reactors[0-3] is the newest + // at this time, reactors[0-3] is the newest assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size()) - //mark last reactorPair as an invalid peer + // mark last reactorPair as an invalid peer reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store lastLogger := log.TestingLogger() @@ -338,86 +345,6 @@ outerFor: assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1) } -func TestBcBlockRequestMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - requestHeight int64 - expectErr bool - }{ - {"Valid Request Message", 0, false}, - {"Valid Request Message", 1, false}, - {"Invalid Request Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - request := bcBlockRequestMessage{Height: tc.requestHeight} - assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - nonResponseHeight int64 - expectErr bool - }{ - {"Valid Non-Response Message", 0, false}, - {"Valid Non-Response Message", 1, false}, - {"Invalid Non-Response Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight} - assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcStatusRequestMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - requestHeight int64 - expectErr bool - }{ - {"Valid Request Message", 0, false}, - {"Valid Request Message", 1, false}, - {"Invalid Request Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - request := bcStatusRequestMessage{Height: tc.requestHeight} - assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - -func TestBcStatusResponseMessageValidateBasic(t *testing.T) { - testCases := []struct { - testName string - responseHeight int64 - expectErr bool - }{ - {"Valid Response Message", 0, false}, - {"Valid Response Message", 1, false}, - {"Invalid Response Message", -1, true}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - response := bcStatusResponseMessage{Height: tc.responseHeight} - assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} - //---------------------------------------------- // utility funcs diff --git a/blockchain/v2/codec.go b/blockchain/v2/codec.go deleted file mode 100644 index 4e92846c4..000000000 --- a/blockchain/v2/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package v2 - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterBlockchainMessages(cdc) - types.RegisterBlockAmino(cdc) -} diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index 32cf3aeaf..4951573ce 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -3,7 +3,9 @@ package v2 import ( "fmt" + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -12,11 +14,11 @@ type iIO interface { sendBlockRequest(peerID p2p.ID, height int64) error sendBlockToPeer(block *types.Block, peerID p2p.ID) error sendBlockNotFound(height int64, peerID p2p.ID) error - sendStatusResponse(height int64, peerID p2p.ID) error + sendStatusResponse(base, height int64, peerID p2p.ID) error - broadcastStatusRequest(base int64, height int64) + broadcastStatusRequest() error - trySwitchToConsensus(state state.State, blocksSynced int) + trySwitchToConsensus(state state.State, skipWAL bool) bool } type switchIO struct { @@ -37,7 +39,7 @@ const ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(state.State, int) + SwitchToConsensus(state state.State, skipWAL bool) } func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { @@ -45,8 +47,11 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { if peer == nil { return fmt.Errorf("peer not found") } + msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) + if err != nil { + return err + } - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{Height: height}) queued := peer.TrySend(BlockchainChannel, msgBytes) if !queued { return fmt.Errorf("send queue full") @@ -54,12 +59,16 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { return nil } -func (sio *switchIO) sendStatusResponse(height int64, peerID p2p.ID) error { +func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error { peer := sio.sw.Peers().Get(peerID) if peer == nil { return fmt.Errorf("peer not found") } - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{Height: height}) + + msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base}) + if err != nil { + return err + } if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { return fmt.Errorf("peer queue full") @@ -76,7 +85,16 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error { if block == nil { panic("trying to send nil block") } - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) + + bpb, err := block.ToProto() + if err != nil { + return err + } + + msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb}) + if err != nil { + return err + } if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { return fmt.Errorf("peer queue full") } @@ -89,7 +107,11 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error { if peer == nil { return fmt.Errorf("peer not found") } - msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: height}) + msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height}) + if err != nil { + return err + } + if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { return fmt.Errorf("peer queue full") } @@ -97,21 +119,22 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error { return nil } -func (sio *switchIO) trySwitchToConsensus(state state.State, blocksSynced int) { +func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(state, blocksSynced) + conR.SwitchToConsensus(state, skipWAL) } + return ok } -func (sio *switchIO) broadcastStatusRequest(base int64, height int64) { - if height == 0 && base > 0 { - base = 0 +func (sio *switchIO) broadcastStatusRequest() error { + msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) + if err != nil { + return err } - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ - Base: base, - Height: height, - }) + // XXX: maybe we should use an io specific peer list here sio.sw.Broadcast(BlockchainChannel, msgBytes) + + return nil } diff --git a/blockchain/v2/processor.go b/blockchain/v2/processor.go index d6a2fe1e8..f9036f3b9 100644 --- a/blockchain/v2/processor.go +++ b/blockchain/v2/processor.go @@ -17,6 +17,11 @@ type pcBlockVerificationFailure struct { secondPeerID p2p.ID } +func (e pcBlockVerificationFailure) String() string { + return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", + e.height, e.firstPeerID, e.secondPeerID) +} + // successful block execution type pcBlockProcessed struct { priorityNormal @@ -24,6 +29,10 @@ type pcBlockProcessed struct { peerID p2p.ID } +func (e pcBlockProcessed) String() string { + return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) +} + // processor has finished type pcFinished struct { priorityNormal @@ -87,9 +96,12 @@ func (state *pcState) synced() bool { } func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) { - if _, ok := state.queue[height]; ok { - panic("duplicate block enqueued by processor") + if item, ok := state.queue[height]; ok { + panic(fmt.Sprintf( + "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", + height, block.Hash(), peerID, item.block.Hash(), item.peerID)) } + state.queue[height] = queueItem{block: block, peerID: peerID} } @@ -110,6 +122,10 @@ func (state *pcState) purgePeer(peerID p2p.ID) { // handle processes FSM events func (state *pcState) handle(event Event) (Event, error) { switch event := event.(type) { + case bcResetState: + state.context.setState(event.state) + return noOp, nil + case scFinishedEv: if state.synced() { return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil @@ -141,16 +157,20 @@ func (state *pcState) handle(event Event) (Event, error) { } return noOp, nil } - first, second := firstItem.block, secondItem.block - firstParts := first.MakePartSet(types.BlockPartSizeBytes) - firstPartsHeader := firstParts.Header() - firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader} + var ( + first, second = firstItem.block, secondItem.block + firstParts = first.MakePartSet(types.BlockPartSizeBytes) + firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} + ) + // verify if +second+ last commit "confirms" +first+ block err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) if err != nil { state.purgePeer(firstItem.peerID) - state.purgePeer(secondItem.peerID) + if firstItem.peerID != secondItem.peerID { + state.purgePeer(secondItem.peerID) + } return pcBlockVerificationFailure{ height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, nil @@ -166,7 +186,6 @@ func (state *pcState) handle(event Event) (Event, error) { state.blocksSynced++ return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } return noOp, nil diff --git a/blockchain/v2/processor_context.go b/blockchain/v2/processor_context.go index 2e8142adc..6a0466550 100644 --- a/blockchain/v2/processor_context.go +++ b/blockchain/v2/processor_context.go @@ -12,6 +12,7 @@ type processorContext interface { verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) tmState() state.State + setState(state.State) } type pContext struct { @@ -38,8 +39,12 @@ func (pc pContext) tmState() state.State { return pc.state } +func (pc *pContext) setState(state state.State) { + pc.state = state +} + func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommit(chainID, blockID, height, commit) + return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) } func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { @@ -86,6 +91,10 @@ func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet } +func (mpc *mockPContext) setState(state state.State) { + mpc.state = state +} + func (mpc *mockPContext) tmState() state.State { return mpc.state } diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index ff89ee94c..5da117c94 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -3,120 +3,22 @@ package v2 import ( "errors" "fmt" - "sync" "time" - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/behaviour" + bc "github.com/tendermint/tendermint/blockchain" "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) -//------------------------------------- - -type bcBlockRequestMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcBlockRequestMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcBlockRequestMessage) String() string { - return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) -} - -type bcNoBlockResponseMessage struct { - Height int64 -} - -// ValidateBasic performs basic validation. -func (m *bcNoBlockResponseMessage) ValidateBasic() error { - if m.Height < 0 { - return errors.New("negative Height") - } - return nil -} - -func (m *bcNoBlockResponseMessage) String() string { - return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height) -} - -//------------------------------------- - -type bcBlockResponseMessage struct { - Block *types.Block -} - -// ValidateBasic performs basic validation. -func (m *bcBlockResponseMessage) ValidateBasic() error { - if m.Block == nil { - return errors.New("block response message has nil block") - } - - return m.Block.ValidateBasic() -} - -func (m *bcBlockResponseMessage) String() string { - return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) -} - -//------------------------------------- - -type bcStatusRequestMessage struct { - Height int64 - Base int64 -} - -// ValidateBasic performs basic validation. -func (m *bcStatusRequestMessage) ValidateBasic() error { - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) - } - return nil -} - -func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) -} - -//------------------------------------- - -type bcStatusResponseMessage struct { - Height int64 - Base int64 -} - -// ValidateBasic performs basic validation. -func (m *bcStatusResponseMessage) ValidateBasic() error { - if m.Base < 0 { - return errors.New("negative Base") - } - if m.Height < 0 { - return errors.New("negative Height") - } - if m.Base > m.Height { - return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) - } - return nil -} - -func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) -} +const ( + // chBufferSize is the buffer size of all event channels. + chBufferSize int = 1000 +) type blockStore interface { LoadBlock(height int64) *types.Block @@ -129,15 +31,16 @@ type blockStore interface { type BlockchainReactor struct { p2p.BaseReactor - events chan Event // XXX: Rename eventsFromPeers - stopDemux chan struct{} - scheduler *Routine - processor *Routine - logger log.Logger + fastSync bool // if true, enable fast sync on start + stateSynced bool // set to true when SwitchToFastSync is called by state sync + scheduler *Routine + processor *Routine + logger log.Logger - mtx sync.RWMutex + mtx tmsync.RWMutex maxPeerHeight int64 syncHeight int64 + events chan Event // non-nil during a fast sync reporter behaviour.Reporter io iIO @@ -149,29 +52,30 @@ type blockVerifier interface { VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error } -//nolint:deadcode type blockApplier interface { ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) } // XXX: unify naming in this package around tmState -// XXX: V1 stores a copy of state as initialState, which is never mutated. Is that nessesary? func newReactor(state state.State, store blockStore, reporter behaviour.Reporter, - blockApplier blockApplier, bufferSize int) *BlockchainReactor { - scheduler := newScheduler(state.LastBlockHeight, time.Now()) + blockApplier blockApplier, fastSync bool) *BlockchainReactor { + initHeight := state.LastBlockHeight + 1 + if initHeight == 1 { + initHeight = state.InitialHeight + } + scheduler := newScheduler(initHeight, time.Now()) pContext := newProcessorContext(store, blockApplier, state) // TODO: Fix naming to just newProcesssor // newPcState requires a processorContext processor := newPcState(pContext) return &BlockchainReactor{ - events: make(chan Event, bufferSize), - stopDemux: make(chan struct{}), - scheduler: newRoutine("scheduler", scheduler.handle, bufferSize), - processor: newRoutine("processor", processor.handle, bufferSize), + scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), + processor: newRoutine("processor", processor.handle, chBufferSize), store: store, reporter: reporter, logger: log.NewNopLogger(), + fastSync: fastSync, } } @@ -182,7 +86,7 @@ func NewBlockchainReactor( store blockStore, fastSync bool) *BlockchainReactor { reporter := behaviour.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, 1000) + return newReactor(state, store, reporter, blockApplier, fastSync) } // SetSwitch implements Reactor interface. @@ -226,12 +130,55 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) { // Start implements cmn.Service interface func (r *BlockchainReactor) Start() error { r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch) + if r.fastSync { + err := r.startSync(nil) + if err != nil { + return fmt.Errorf("failed to start fast sync: %w", err) + } + } + return nil +} + +// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil, +// the scheduler and processor is updated with this state on startup. +func (r *BlockchainReactor) startSync(state *state.State) error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.events != nil { + return errors.New("fast sync already in progress") + } + r.events = make(chan Event, chBufferSize) go r.scheduler.start() go r.processor.start() - go r.demux() + if state != nil { + <-r.scheduler.ready() + <-r.processor.ready() + r.scheduler.send(bcResetState{state: *state}) + r.processor.send(bcResetState{state: *state}) + } + go r.demux(r.events) return nil } +// endSync ends a fast sync +func (r *BlockchainReactor) endSync() { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.events != nil { + close(r.events) + } + r.events = nil + r.scheduler.stop() + r.processor.stop() +} + +// SwitchToFastSync is called by the state sync reactor when switching to fast sync. +func (r *BlockchainReactor) SwitchToFastSync(state state.State) error { + r.stateSynced = true + state = state.Copy() + return r.startSync(&state) +} + // reactor generated ticker events: // ticker for cleaning peers type rTryPrunePeer struct { @@ -240,7 +187,7 @@ type rTryPrunePeer struct { } func (e rTryPrunePeer) String() string { - return fmt.Sprintf(": %v", e.time) + return fmt.Sprintf("rTryPrunePeer{%v}", e.time) } // ticker event for scheduling block requests @@ -250,7 +197,7 @@ type rTrySchedule struct { } func (e rTrySchedule) String() string { - return fmt.Sprintf(": %v", e.time) + return fmt.Sprintf("rTrySchedule{%v}", e.time) } // ticker for block processing @@ -258,6 +205,10 @@ type rProcessBlock struct { priorityNormal } +func (e rProcessBlock) String() string { + return "rProcessBlock" +} + // reactor generated events based on blockchain related messages from peers: // blockResponse message received from a peer type bcBlockResponse struct { @@ -268,6 +219,11 @@ type bcBlockResponse struct { block *types.Block } +func (resp bcBlockResponse) String() string { + return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", + resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) +} + // blockNoResponse message received from a peer type bcNoBlockResponse struct { priorityNormal @@ -276,6 +232,11 @@ type bcNoBlockResponse struct { height int64 } +func (resp bcNoBlockResponse) String() string { + return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", + resp.peerID, resp.height, resp.time) +} + // statusResponse message received from a peer type bcStatusResponse struct { priorityNormal @@ -285,12 +246,21 @@ type bcStatusResponse struct { height int64 } +func (resp bcStatusResponse) String() string { + return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", + resp.peerID, resp.height, resp.base, resp.time) +} + // new peer is connected type bcAddNewPeer struct { priorityNormal peerID p2p.ID } +func (resp bcAddNewPeer) String() string { + return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) +} + // existing peer is removed type bcRemovePeer struct { priorityHigh @@ -298,7 +268,22 @@ type bcRemovePeer struct { reason interface{} } -func (r *BlockchainReactor) demux() { +func (resp bcRemovePeer) String() string { + return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) +} + +// resets the scheduler and processor state, e.g. following a switch from state syncing +type bcResetState struct { + priorityHigh + state state.State +} + +func (e bcResetState) String() string { + return fmt.Sprintf("bcResetState{%v}", e.state) +} + +// Takes the channel as a parameter to avoid race conditions on r.events. +func (r *BlockchainReactor) demux(events <-chan Event) { var lastRate = 0.0 var lastHundred = time.Now() @@ -306,19 +291,30 @@ func (r *BlockchainReactor) demux() { processBlockFreq = 20 * time.Millisecond doProcessBlockCh = make(chan struct{}, 1) doProcessBlockTk = time.NewTicker(processBlockFreq) + ) + defer doProcessBlockTk.Stop() + var ( prunePeerFreq = 1 * time.Second doPrunePeerCh = make(chan struct{}, 1) doPrunePeerTk = time.NewTicker(prunePeerFreq) + ) + defer doPrunePeerTk.Stop() + var ( scheduleFreq = 20 * time.Millisecond doScheduleCh = make(chan struct{}, 1) doScheduleTk = time.NewTicker(scheduleFreq) + ) + defer doScheduleTk.Stop() + var ( statusFreq = 10 * time.Second doStatusCh = make(chan struct{}, 1) doStatusTk = time.NewTicker(statusFreq) ) + defer doStatusTk.Stop() + doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers // XXX: Extract timers to make testing atemporal for { @@ -353,31 +349,54 @@ func (r *BlockchainReactor) demux() { case <-doProcessBlockCh: r.processor.send(rProcessBlock{}) case <-doStatusCh: - r.io.broadcastStatusRequest(r.store.Base(), r.SyncHeight()) + if err := r.io.broadcastStatusRequest(); err != nil { + r.logger.Error("Error broadcasting status request", "err", err) + } - // Events from peers - case event := <-r.events: + // Events from peers. Closing the channel signals event loop termination. + case event, ok := <-events: + if !ok { + r.logger.Info("Stopping event processing") + return + } switch event := event.(type) { case bcStatusResponse: r.setMaxPeerHeight(event.height) r.scheduler.send(event) case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: r.scheduler.send(event) + default: + r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) } - // Incremental events form scheduler + // Incremental events from scheduler case event := <-r.scheduler.next(): switch event := event.(type) { case scBlockReceived: r.processor.send(event) case scPeerError: r.processor.send(event) - r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")) + if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil { + r.logger.Error("Error reporting peer", "err", err) + } case scBlockRequest: - r.io.sendBlockRequest(event.peerID, event.height) + if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil { + r.logger.Error("Error sending block request", "err", err) + } case scFinishedEv: r.processor.send(event) r.scheduler.stop() + case scSchedulerFail: + r.logger.Error("Scheduler failure", "err", event.reason.Error()) + case scPeersPruned: + // Remove peers from the processor. + for _, peerID := range event.peers { + r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) + } + r.logger.Debug("Pruned peers", "count", len(event.peers)) + case noOpEvent: + default: + r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) } // Incremental events from processor @@ -387,7 +406,7 @@ func (r *BlockchainReactor) demux() { r.setSyncHeight(event.height) if r.syncHeight%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.logger.Info("Fast Syncc Rate", "height", r.syncHeight, + r.logger.Info("Fast Sync Rate", "height", r.syncHeight, "max_peer_height", r.maxPeerHeight, "blocks/s", lastRate) lastHundred = time.Now() } @@ -395,22 +414,34 @@ func (r *BlockchainReactor) demux() { case pcBlockVerificationFailure: r.scheduler.send(event) case pcFinished: - r.io.trySwitchToConsensus(event.tmState, event.blocksSynced) - r.processor.stop() + r.logger.Info("Fast sync complete, switching to consensus") + if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { + r.logger.Error("Failed to switch to consensus reactor") + } + r.endSync() + return + case noOpEvent: + default: + r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) } - // Terminal events from scheduler + // Terminal event from scheduler case err := <-r.scheduler.final(): - r.logger.Info(fmt.Sprintf("scheduler final %s", err)) - // send the processor stop? + switch err { + case nil: + r.logger.Info("Scheduler stopped") + default: + r.logger.Error("Scheduler aborted with error", "err", err) + } // Terminal event from processor - case event := <-r.processor.final(): - r.logger.Info(fmt.Sprintf("processor final %s", event)) - - case <-r.stopDemux: - r.logger.Info("demuxing stopped") - return + case err := <-r.processor.final(): + switch err { + case nil: + r.logger.Info("Processor stopped") + default: + r.logger.Error("Processor aborted with error", "err", err) + } } } } @@ -418,51 +449,16 @@ func (r *BlockchainReactor) demux() { // Stop implements cmn.Service interface. func (r *BlockchainReactor) Stop() error { r.logger.Info("reactor stopping") - - r.scheduler.stop() - r.processor.stop() - close(r.stopDemux) - close(r.events) - + r.endSync() r.logger.Info("reactor stopped") return nil } -const ( - // NOTE: keep up to date with bcBlockResponseMessage - bcBlockResponseMessagePrefixSize = 4 - bcBlockResponseMessageFieldKeySize = 1 - maxMsgSize = types.MaxBlockSizeBytes + - bcBlockResponseMessagePrefixSize + - bcBlockResponseMessageFieldKeySize -) - -// BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface { - ValidateBasic() error -} - -// RegisterBlockchainMessages registers the fast sync messages for amino encoding. -func RegisterBlockchainMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) -} - -func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - // Receive implements Reactor by handling different message types. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) + msg, err := bc.DecodeMsg(msgBytes) if err != nil { r.logger.Error("error decoding message", "src", src.ID(), "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) @@ -470,7 +466,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { return } - if err = msg.ValidateBasic(); err != nil { + if err = bc.ValidateMsg(msg); err != nil { r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err) _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) return @@ -479,12 +475,12 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg) switch msg := msg.(type) { - case *bcStatusRequestMessage: - if err := r.io.sendStatusResponse(r.store.Height(), src.ID()); err != nil { + case *bcproto.StatusRequest: + if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil { r.logger.Error("Could not send status message to peer", "src", src) } - case *bcBlockRequestMessage: + case *bcproto.BlockRequest: block := r.store.LoadBlock(msg.Height) if block != nil { if err = r.io.sendBlockToPeer(block, src.ID()); err != nil { @@ -498,38 +494,63 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } } - case *bcStatusResponseMessage: - r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} - - case *bcBlockResponseMessage: - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: msg.Block, - size: int64(len(msgBytes)), - time: time.Now(), + case *bcproto.StatusResponse: + r.mtx.RLock() + if r.events != nil { + r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} + } + r.mtx.RUnlock() + + case *bcproto.BlockResponse: + r.mtx.RLock() + bi, err := types.BlockFromProto(msg.Block) + if err != nil { + r.logger.Error("error transitioning block from protobuf", "err", err) + _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) + return + } + if r.events != nil { + r.events <- bcBlockResponse{ + peerID: src.ID(), + block: bi, + size: int64(len(msgBytes)), + time: time.Now(), + } } + r.mtx.RUnlock() - case *bcNoBlockResponseMessage: - r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()} + case *bcproto.NoBlockResponse: + r.mtx.RLock() + if r.events != nil { + r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()} + } + r.mtx.RUnlock() } } // AddPeer implements Reactor interface func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Height(), peer.ID()) + err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID()) if err != nil { r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight()) } - r.events <- bcAddNewPeer{peerID: peer.ID()} + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.events != nil { + r.events <- bcAddNewPeer{peerID: peer.ID()} + } } // RemovePeer implements Reactor interface. func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - event := bcRemovePeer{ - peerID: peer.ID(), - reason: reason, + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.events != nil { + r.events <- bcRemovePeer{ + peerID: peer.ID(), + reason: reason, + } } - r.events <- event } // GetChannels implements Reactor @@ -540,7 +561,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { Priority: 10, SendQueueCapacity: 2000, RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: maxMsgSize, + RecvMessageCapacity: bc.MaxMsgSize, }, } } diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 10b1d23df..35cedf178 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -1,6 +1,7 @@ package v2 import ( + "fmt" "net" "os" "sort" @@ -8,18 +9,20 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/behaviour" + bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/mock" + "github.com/tendermint/tendermint/mempool/mock" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/conn" + bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -96,7 +99,7 @@ func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error { return nil } -func (sio *mockSwitchIo) sendStatusResponse(height int64, peerID p2p.ID) error { +func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error { sio.mtx.Lock() defer sio.mtx.Unlock() sio.numStatusResponse++ @@ -117,13 +120,15 @@ func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error { return nil } -func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, blocksSynced int) { +func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool { sio.mtx.Lock() defer sio.mtx.Unlock() sio.switchedToConsensus = true + return true } -func (sio *mockSwitchIo) broadcastStatusRequest(base int64, height int64) { +func (sio *mockSwitchIo) broadcastStatusRequest() error { + return nil } type testReactorParams struct { @@ -131,7 +136,6 @@ type testReactorParams struct { genDoc *types.GenesisDoc privVals []types.PrivValidator startHeight int64 - bufferSize int mockA bool } @@ -149,14 +153,17 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(errors.Wrap(err, "error start app")) + panic(fmt.Errorf("error start app: %w", err)) } db := dbm.NewMemDB() - appl = sm.NewBlockExecutor(db, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + stateStore := sm.NewStore(db) + appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) + if err = stateStore.Save(state); err != nil { + panic(err) + } } - r := newReactor(state, store, reporter, appl, p.bufferSize) + r := newReactor(state, store, reporter, appl, true) logger := log.TestingLogger() r.SetLogger(logger.With("module", "blockchain")) @@ -353,7 +360,6 @@ func TestReactorHelperMode(t *testing.T) { genDoc: genDoc, privVals: privVals, startHeight: 20, - bufferSize: 100, mockA: true, } @@ -371,10 +377,10 @@ func TestReactorHelperMode(t *testing.T) { name: "status request", params: params, msgs: []testEvent{ - {"P1", bcStatusRequestMessage{}}, - {"P1", bcBlockRequestMessage{Height: 13}}, - {"P1", bcBlockRequestMessage{Height: 20}}, - {"P1", bcBlockRequestMessage{Height: 22}}, + {"P1", bcproto.StatusRequest{}}, + {"P1", bcproto.BlockRequest{Height: 13}}, + {"P1", bcproto.BlockRequest{Height: 20}}, + {"P1", bcproto.BlockRequest{Height: 22}}, }, }, } @@ -383,30 +389,39 @@ func TestReactorHelperMode(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { reactor := newTestReactor(params) - reactor.Start() mockSwitch := &mockSwitchIo{switchedToConsensus: false} reactor.io = mockSwitch + err := reactor.Start() + require.NoError(t, err) for i := 0; i < len(tt.msgs); i++ { step := tt.msgs[i] switch ev := step.event.(type) { - case bcStatusRequestMessage: + case bcproto.StatusRequest: old := mockSwitch.numStatusResponse - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) + msg, err := bc.EncodeMsg(&ev) + assert.NoError(t, err) + reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcBlockRequestMessage: + case bcproto.BlockRequest: if ev.Height > params.startHeight { old := mockSwitch.numNoBlockResponse - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) + msg, err := bc.EncodeMsg(&ev) + assert.NoError(t, err) + reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) } else { old := mockSwitch.numBlockResponse - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, cdc.MustMarshalBinaryBare(ev)) + msg, err := bc.EncodeMsg(&ev) + assert.NoError(t, err) + assert.NoError(t, err) + reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) assert.Equal(t, old+1, mockSwitch.numBlockResponse) } } } - reactor.Stop() + err = reactor.Stop() + require.NoError(t, err) }) } } @@ -481,21 +496,24 @@ func newReactorStore( proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() if err != nil { - panic(errors.Wrap(err, "error start app")) + panic(fmt.Errorf("error start app: %w", err)) } stateDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { - panic(errors.Wrap(err, "error constructing state from genesis file")) + panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mock.Mempool{}, sm.EmptyEvidencePool{}) + if err = stateStore.Save(state); err != nil { + panic(err) + } // add blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -521,11 +539,11 @@ func newReactorStore( thisBlock := makeBlock(blockHeight, state, lastCommit) thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} + blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { - panic(errors.Wrap(err, "error apply block")) + panic(fmt.Errorf("error apply block: %w", err)) } blockStore.SaveBlock(thisBlock, thisParts, lastCommit) diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index ff12bfebc..e4ca52add 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -2,6 +2,7 @@ package v2 import ( "fmt" + "strings" "sync/atomic" "github.com/Workiva/go-datastructures/queue" @@ -11,6 +12,8 @@ import ( type handleFunc = func(event Event) (Event, error) +const historySize = 25 + // Routine is a structure that models a finite state machine as serialized // stream of events processed by a handle function. This Routine structure // handles the concurrency and messaging guarantees. Events are sent via @@ -21,6 +24,7 @@ type Routine struct { name string handle handleFunc queue *queue.PriorityQueue + history []Event out chan Event fin chan error rdy chan struct{} @@ -34,6 +38,7 @@ func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { name: name, handle: handleFunc, queue: queue.NewPriorityQueue(bufferSize, true), + history: make([]Event, 0, historySize), out: make(chan Event, bufferSize), rdy: make(chan struct{}, 1), fin: make(chan error, 1), @@ -43,7 +48,6 @@ func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { } } -// nolint: unused func (rt *Routine) setLogger(logger log.Logger) { rt.logger = logger } @@ -54,13 +58,24 @@ func (rt *Routine) setMetrics(metrics *Metrics) { } func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name)) + rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) if !running { panic(fmt.Sprintf("%s is already running", rt.name)) } close(rt.rdy) defer func() { + if r := recover(); r != nil { + var ( + b strings.Builder + j int + ) + for i := len(rt.history) - 1; i >= 0; i-- { + fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) + j++ + } + panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) + } stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) if !stopped { panic(fmt.Sprintf("%s is failed to stop", rt.name)) @@ -69,9 +84,11 @@ func (rt *Routine) start() { for { events, err := rt.queue.Get(1) - if err != nil { - rt.logger.Info(fmt.Sprintf("%s: stopping\n", rt.name)) - rt.terminate(fmt.Errorf("stopped")) + if err == queue.ErrDisposed { + rt.terminate(nil) + return + } else if err != nil { + rt.terminate(err) return } oEvent, err := rt.handle(events[0].(Event)) @@ -81,7 +98,19 @@ func (rt *Routine) start() { return } rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v\n", rt.name, oEvent, oEvent)) + rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) + + // Skip rTrySchedule and rProcessBlock events as they clutter the history + // due to their frequency. + switch events[0].(type) { + case rTrySchedule: + case rProcessBlock: + default: + rt.history = append(rt.history, events[0].(Event)) + if len(rt.history) > historySize { + rt.history = rt.history[1:] + } + } rt.out <- oEvent } @@ -96,7 +125,7 @@ func (rt *Routine) send(event Event) bool { err := rt.queue.Put(event) if err != nil { rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name)) + rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) return false } @@ -121,7 +150,7 @@ func (rt *Routine) stop() { return } - rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name)) + rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) rt.queue.Dispose() // this should block until all queue items are free? } @@ -131,6 +160,7 @@ func (rt *Routine) final() chan error { // XXX: Maybe get rid of this func (rt *Routine) terminate(reason error) { - close(rt.out) + // We don't close the rt.out channel here, to avoid spinning on the closed channel + // in the event loop. rt.fin <- reason } diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go index 803955b22..75fe9d46d 100644 --- a/blockchain/v2/scheduler.go +++ b/blockchain/v2/scheduler.go @@ -2,6 +2,7 @@ package v2 import ( "bytes" + "errors" "fmt" "math" "sort" @@ -18,6 +19,10 @@ type scFinishedEv struct { reason string } +func (e scFinishedEv) String() string { + return fmt.Sprintf("scFinishedEv{%v}", e.reason) +} + // send a blockRequest message type scBlockRequest struct { priorityNormal @@ -25,6 +30,10 @@ type scBlockRequest struct { height int64 } +func (e scBlockRequest) String() string { + return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) +} + // a block has been received and validated by the scheduler type scBlockReceived struct { priorityNormal @@ -32,6 +41,10 @@ type scBlockReceived struct { block *types.Block } +func (e scBlockReceived) String() string { + return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) +} + // scheduler detected a peer error type scPeerError struct { priorityHigh @@ -40,7 +53,7 @@ type scPeerError struct { } func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError - peerID %s, err %s", e.peerID, e.reason) + return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) } // scheduler removed a set of peers (timed out or slow peer) @@ -49,6 +62,10 @@ type scPeersPruned struct { peers []p2p.ID } +func (e scPeersPruned) String() string { + return fmt.Sprintf("scPeersPruned{%v}", e.peers) +} + // XXX: make this fatal? // scheduler encountered a fatal error type scSchedulerFail struct { @@ -56,6 +73,10 @@ type scSchedulerFail struct { reason error } +func (e scSchedulerFail) String() string { + return fmt.Sprintf("scSchedulerFail{%v}", e.reason) +} + type blockState int const ( @@ -181,7 +202,7 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler { initHeight: initHeight, lastAdvance: startTime, syncTimeout: 60 * time.Second, - height: initHeight + 1, + height: initHeight, blockStates: make(map[int64]blockState), peers: make(map[p2p.ID]*scPeer), pendingBlocks: make(map[int64]p2p.ID), @@ -189,19 +210,17 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler { receivedBlocks: make(map[int64]p2p.ID), targetPending: 10, // TODO - pass as param peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, //int64(7680), TODO - pass as param + minRecvRate: 0, // int64(7680), TODO - pass as param } return &sc } -func (sc *scheduler) addPeer(peerID p2p.ID) error { - if _, ok := sc.peers[peerID]; ok { - // In the future we should be able to add a previously removed peer - return fmt.Errorf("cannot add duplicate peer %s", peerID) +func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer { + if _, ok := sc.peers[peerID]; !ok { + sc.peers[peerID] = newScPeer(peerID) } - sc.peers[peerID] = newScPeer(peerID) - return nil + return sc.peers[peerID] } func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error { @@ -219,14 +238,13 @@ func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error { return nil } -func (sc *scheduler) removePeer(peerID p2p.ID) error { +func (sc *scheduler) removePeer(peerID p2p.ID) { peer, ok := sc.peers[peerID] if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) + return } - if peer.state == peerStateRemoved { - return fmt.Errorf("tried to remove peer %s in peerStateRemoved", peerID) + return } for height, pendingPeerID := range sc.pendingBlocks { @@ -260,8 +278,6 @@ func (sc *scheduler) removePeer(peerID p2p.ID) error { delete(sc.blockStates, h) } } - - return nil } // check if the blockPool is running low and add new blocks in New state to be requested. @@ -283,13 +299,10 @@ func (sc *scheduler) addNewBlocks() { } func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } + peer := sc.ensurePeer(peerID) if peer.state == peerStateRemoved { - return fmt.Errorf("cannot set peer height for a peer in peerStateRemoved") + return nil // noop } if height < peer.height { @@ -298,6 +311,7 @@ func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error } if base > height { + sc.removePeer(peerID) return fmt.Errorf("cannot set peer base higher than its height") } @@ -351,15 +365,9 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) { sc.blockStates[height] = state } +// CONTRACT: peer exists and in Ready state. func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot receive blocks from not ready peer %s", peerID) - } + peer := sc.peers[peerID] if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) @@ -415,17 +423,17 @@ func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) er } func (sc *scheduler) markProcessed(height int64) error { + // It is possible that a peer error or timeout is handled after the processor + // has processed the block but before the scheduler received this event, so + // when pcBlockProcessed event is received, the block had been requested + // again => don't check the block state. sc.lastAdvance = time.Now() - state := sc.getStateAtHeight(height) - if state != blockStateReceived { - return fmt.Errorf("cannot mark height %d received from block state %s", height, state) - } - - sc.height++ + sc.height = height + 1 + delete(sc.pendingBlocks, height) + delete(sc.pendingTime, height) delete(sc.receivedBlocks, height) delete(sc.blockStates, height) sc.addNewBlocks() - return nil } @@ -511,9 +519,7 @@ func (peers PeerByID) Less(i, j int) bool { } func (peers PeerByID) Swap(i, j int) { - it := peers[i] - peers[i] = peers[j] - peers[j] = it + peers[i], peers[j] = peers[j], peers[i] } // Handlers @@ -522,12 +528,13 @@ func (peers PeerByID) Swap(i, j int) { func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { err := sc.touchPeer(event.peerID, event.time) if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil + // peer does not exist OR not ready + return noOp, nil } err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) if err != nil { - _ = sc.removePeer(event.peerID) + sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, reason: err}, nil } @@ -535,16 +542,14 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { } func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - if len(sc.peers) == 0 { - return noOp, nil - } - + // No such peer or peer was removed. peer, ok := sc.peers[event.peerID] if !ok || peer.state == peerStateRemoved { return noOp, nil } + // The peer may have been just removed due to errors, low speed or timeouts. - _ = sc.removePeer(event.peerID) + sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", @@ -553,13 +558,11 @@ func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, erro func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { if event.height != sc.height { - panic(fmt.Sprintf("processed height %d but expected height %d", event.height, sc.height)) + panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) } + err := sc.markProcessed(event.height) if err != nil { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, - // so when pcBlockProcessed event is received the block had been requested again. return scSchedulerFail{reason: err}, nil } @@ -573,13 +576,10 @@ func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) // Handles an error from the processor. The processor had already cleaned the blocks from // the peers included in this event. Just attempt to remove the peers. func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - if len(sc.peers) == 0 { - return noOp, nil - } // The peers may have been just removed due to errors, low speed or timeouts. - _ = sc.removePeer(event.firstPeerID) + sc.removePeer(event.firstPeerID) if event.firstPeerID != event.secondPeerID { - _ = sc.removePeer(event.secondPeerID) + sc.removePeer(event.secondPeerID) } if sc.allBlocksProcessed() { @@ -590,28 +590,23 @@ func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) ( } func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - err := sc.addPeer(event.peerID) - if err != nil { - return scSchedulerFail{reason: err}, nil - } + sc.ensurePeer(event.peerID) return noOp, nil } func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - err := sc.removePeer(event.peerID) - if err != nil { - // XXX - It is possible that the removePeer fails here for legitimate reasons - // for example if a peer timeout or error was handled just before this. - return scSchedulerFail{reason: err}, nil - } + sc.removePeer(event.peerID) + if sc.allBlocksProcessed() { return scFinishedEv{reason: "removed peer"}, nil } - return noOp, nil + + // Return scPeerError so the peer (and all associated blocks) is removed from + // the processor. + return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil } func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. timeHeightAsked, ok := sc.pendingTime[sc.height] if ok && time.Since(timeHeightAsked) > sc.peerTimeout { @@ -627,11 +622,7 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { return noOp, nil } for _, peerID := range prunablePeers { - err := sc.removePeer(peerID) - if err != nil { - // Should never happen as prunablePeers() returns only existing peers in Ready state. - panic("scheduler data corruption") - } + sc.removePeer(peerID) } // If all blocks are processed we should finish. @@ -640,7 +631,18 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { } return scPeersPruned{peers: prunablePeers}, nil +} +func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { + initHeight := event.state.LastBlockHeight + 1 + if initHeight == 1 { + initHeight = event.state.InitialHeight + } + sc.initHeight = initHeight + sc.height = initHeight + sc.lastAdvance = time.Now() + sc.addNewBlocks() + return noOp, nil } func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { @@ -674,6 +676,9 @@ func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) func (sc *scheduler) handle(event Event) (Event, error) { switch event := event.(type) { + case bcResetState: + nextEvent, err := sc.handleResetState(event) + return nextEvent, err case bcStatusResponse: nextEvent, err := sc.handleStatusResponse(event) return nextEvent, err diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go index 4ec81e123..d2b2c9595 100644 --- a/blockchain/v2/scheduler_test.go +++ b/blockchain/v2/scheduler_test.go @@ -8,8 +8,10 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -42,7 +44,11 @@ func newTestScheduler(params scTestParams) *scheduler { peers := make(map[p2p.ID]*scPeer) var maxHeight int64 - sc := newScheduler(params.initHeight, params.startTime) + initHeight := params.initHeight + if initHeight == 0 { + initHeight = 1 + } + sc := newScheduler(initHeight, params.startTime) if params.height != 0 { sc.height = params.height } @@ -95,7 +101,8 @@ func TestScInit(t *testing.T) { initHeight int64 = 5 sc = newScheduler(initHeight, time.Now()) ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight)) + assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) + assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) } @@ -114,9 +121,8 @@ func TestScMaxHeights(t *testing.T) { { name: "one ready peer", sc: scheduler{ - initHeight: 2, - height: 3, - peers: map[p2p.ID]*scPeer{"P1": {height: 6, state: peerStateReady}}, + height: 3, + peers: map[p2p.ID]*scPeer{"P1": {height: 6, state: peerStateReady}}, }, wantMax: 6, }, @@ -178,7 +184,7 @@ func TestScMaxHeights(t *testing.T) { } } -func TestScAddPeer(t *testing.T) { +func TestScEnsurePeer(t *testing.T) { type args struct { peerID p2p.ID @@ -188,7 +194,6 @@ func TestScAddPeer(t *testing.T) { fields scTestParams args args wantFields scTestParams - wantErr bool }{ { name: "add first peer", @@ -205,20 +210,18 @@ func TestScAddPeer(t *testing.T) { "P2": {base: -1, height: -1, state: peerStateNew}}}, }, { - name: "attempt to add duplicate peer", + name: "add duplicate peer is fine", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, args: args{peerID: "P1"}, wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - wantErr: true, }, { - name: "attempt to add duplicate peer with existing peer in Ready state", + name: "add duplicate peer with existing peer in Ready state is noop", fields: scTestParams{ peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, allB: []int64{1, 2, 3}, }, - args: args{peerID: "P1"}, - wantErr: true, + args: args{peerID: "P1"}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, allB: []int64{1, 2, 3}, @@ -230,9 +233,7 @@ func TestScAddPeer(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - if err := sc.addPeer(tt.args.peerID); (err != nil) != tt.wantErr { - t.Errorf("scAddPeer() wantErr %v, error = %v", tt.wantErr, err) - } + sc.ensurePeer(tt.args.peerID) wantSc := newTestScheduler(tt.wantFields) assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) }) @@ -374,7 +375,6 @@ func TestScRemovePeer(t *testing.T) { fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, args: args{peerID: "P2"}, wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - wantErr: true, }, { name: "remove single New peer", @@ -418,7 +418,6 @@ func TestScRemovePeer(t *testing.T) { "P1": {height: 10, state: peerStateRemoved}, "P2": {height: 11, state: peerStateReady}}, allB: []int64{8, 9, 10, 11}}, - wantErr: true, }, { name: "remove Ready peer with blocks requested", @@ -492,9 +491,7 @@ func TestScRemovePeer(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - if err := sc.removePeer(tt.args.peerID); (err != nil) != tt.wantErr { - t.Errorf("removePeer() wantErr %v, error = %v", tt.wantErr, err) - } + sc.removePeer(tt.args.peerID) wantSc := newTestScheduler(tt.wantFields) assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) }) @@ -522,9 +519,11 @@ func TestScSetPeerRange(t *testing.T) { allB: []int64{1, 2}}, args: args{peerID: "P2", height: 4}, wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, + peers: map[string]*scPeer{ + "P1": {height: 2, state: peerStateReady}, + "P2": {height: 4, state: peerStateReady}, + }, + allB: []int64{1, 2, 3, 4}}, }, { name: "increase height of removed peer", @@ -532,7 +531,6 @@ func TestScSetPeerRange(t *testing.T) { peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, args: args{peerID: "P1", height: 4}, wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, }, { name: "decrease height of single peer", @@ -584,8 +582,7 @@ func TestScSetPeerRange(t *testing.T) { allB: []int64{1, 2, 3, 4}}, args: args{peerID: "P1", base: 6, height: 5}, wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, wantErr: true, }, { @@ -991,19 +988,20 @@ func TestScMarkProcessed(t *testing.T) { { name: "processed an unreceived block", fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]p2p.ID{1: "P1"}}, + height: 2, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{2}, + pending: map[int64]p2p.ID{2: "P1"}, + pendingTime: map[int64]time.Time{2: now}, + targetPending: 1, + }, args: args{height: 2}, wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]p2p.ID{1: "P1"}}, - wantErr: true, + height: 3, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{3}, + targetPending: 1, + }, }, { name: "mark processed success", @@ -1043,6 +1041,40 @@ func TestScMarkProcessed(t *testing.T) { } } +func TestScResetState(t *testing.T) { + tests := []struct { + name string + fields scTestParams + state state.State + wantFields scTestParams + }{ + { + name: "updates height and initHeight", + fields: scTestParams{ + height: 0, + initHeight: 0, + }, + state: state.State{LastBlockHeight: 7}, + wantFields: scTestParams{ + height: 8, + initHeight: 8, + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + sc := newTestScheduler(tt.fields) + e, err := sc.handleResetState(bcResetState{state: tt.state}) + require.NoError(t, err) + assert.Equal(t, e, noOp) + wantSc := newTestScheduler(tt.wantFields) + checkSameScheduler(t, wantSc, sc) + }) + } +} + func TestScAllBlocksProcessed(t *testing.T) { now := time.Now() @@ -1129,14 +1161,13 @@ func TestScNextHeightToSchedule(t *testing.T) { }{ { name: "no blocks", - fields: scTestParams{initHeight: 10, height: 11}, + fields: scTestParams{initHeight: 11, height: 11}, wantHeight: -1, }, { name: "only New blocks", fields: scTestParams{ - initHeight: 2, - height: 3, + initHeight: 3, peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, allB: []int64{3, 4, 5, 6}, }, @@ -1145,7 +1176,7 @@ func TestScNextHeightToSchedule(t *testing.T) { { name: "only Pending blocks", fields: scTestParams{ - height: 1, + initHeight: 1, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, @@ -1156,26 +1187,26 @@ func TestScNextHeightToSchedule(t *testing.T) { { name: "only Received blocks", fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, + initHeight: 1, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, + received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, }, wantHeight: -1, }, { name: "only Processed blocks", fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, + initHeight: 1, + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, }, wantHeight: 1, }, { name: "mixed block states", fields: scTestParams{ - height: 1, + initHeight: 1, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, pending: map[int64]p2p.ID{2: "P1"}, @@ -1342,6 +1373,9 @@ func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, even t.Errorf("error = %v, wantErr %v", err, wantErr) return } + if !assert.IsType(t, wantEvent, event) { + t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) + } switch wantEvent := wantEvent.(type) { case scPeerError: assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) @@ -1378,13 +1412,13 @@ func TestScHandleBlockResponse(t *testing.T) { name: "empty scheduler", fields: scTestParams{}, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block from removed peer", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block we haven't asked for", @@ -1403,7 +1437,7 @@ func TestScHandleBlockResponse(t *testing.T) { pendingTime: map[int64]time.Time{6: now}, }, args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "block with bad timestamp", @@ -1425,7 +1459,7 @@ func TestScHandleBlockResponse(t *testing.T) { pendingTime: map[int64]time.Time{6: now}, }, args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(6)}, + wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, }, } @@ -1537,26 +1571,24 @@ func TestScHandleBlockProcessed(t *testing.T) { name: "empty scheduler", fields: scTestParams{height: 6}, args: args{event: processed6FromP1}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "processed block we don't have", fields: scTestParams{ - initHeight: 5, - height: 6, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, pending: map[int64]p2p.ID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: processed6FromP1}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "processed block ok, we processed all blocks", fields: scTestParams{ - initHeight: 5, - height: 6, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, allB: []int64{6, 7}, received: map[int64]p2p.ID{6: "P1", 7: "P1"}, @@ -1567,8 +1599,7 @@ func TestScHandleBlockProcessed(t *testing.T) { { name: "processed block ok, we still have blocks to process", fields: scTestParams{ - initHeight: 5, - height: 6, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, pending: map[int64]p2p.ID{7: "P1", 8: "P1"}, @@ -1612,8 +1643,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { { name: "failed block we don't have, single peer is still removed", fields: scTestParams{ - initHeight: 5, - height: 6, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, pending: map[int64]p2p.ID{6: "P1"}, @@ -1625,7 +1655,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { { name: "failed block we don't have, one of two peers are removed", fields: scTestParams{ - initHeight: 5, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, pending: map[int64]p2p.ID{6: "P1"}, @@ -1637,8 +1667,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { { name: "failed block, all blocks are processed after removal", fields: scTestParams{ - initHeight: 5, - height: 6, + initHeight: 6, peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, allB: []int64{6, 7}, received: map[int64]p2p.ID{6: "P1", 7: "P1"}, @@ -1649,7 +1678,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { { name: "failed block, we still have blocks to process", fields: scTestParams{ - initHeight: 4, + initHeight: 5, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, allB: []int64{5, 6, 7, 8}, pending: map[int64]p2p.ID{7: "P1", 8: "P1"}, @@ -1661,7 +1690,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { { name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", fields: scTestParams{ - initHeight: 4, + initHeight: 5, peers: map[string]*scPeer{ "P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}, @@ -1710,19 +1739,19 @@ func TestScHandleAddNewPeer(t *testing.T) { { name: "add duplicate peer", fields: scTestParams{ - height: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, + initHeight: 6, + peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, + allB: []int64{6, 7, 8}, }, args: args{event: addP1}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "add P1 to non empty scheduler", fields: scTestParams{ - height: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, + initHeight: 6, + peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, + allB: []int64{6, 7, 8}, }, args: args{event: addP1}, wantEvent: noOpEvent{}, @@ -1961,14 +1990,14 @@ func TestScHandleStatusResponse(t *testing.T) { allB: []int64{1, 2}, }, args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { name: "increase height of removed peer", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, + wantEvent: noOpEvent{}, }, { @@ -2017,6 +2046,8 @@ func TestScHandle(t *testing.T) { priorityNormal } + block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) + t0 := time.Now() tick := make([]time.Time, 100) for i := range tick { @@ -2105,8 +2136,8 @@ func TestScHandle(t *testing.T) { }, }, { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: makeScBlock(1)}}, - wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(1)}, + args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, + wantEvent: scBlockReceived{peerID: "P1", block: block1}, wantSc: &scTestParams{ startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, @@ -2118,8 +2149,8 @@ func TestScHandle(t *testing.T) { }, }, { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: makeScBlock(2)}}, - wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(2)}, + args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, + wantEvent: scBlockReceived{peerID: "P1", block: block2}, wantSc: &scTestParams{ startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, @@ -2131,8 +2162,8 @@ func TestScHandle(t *testing.T) { }, }, { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: makeScBlock(3)}}, - wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(3)}, + args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, + wantEvent: scBlockReceived{peerID: "P1", block: block3}, wantSc: &scTestParams{ startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 000000000..dc56781dd --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,13 @@ +# The version of the generation template. +# Required. +# The only currently-valid value is v1beta1. +version: v1beta1 + +# The plugins to run. +plugins: + # The name of the plugin. + - name: gogofaster + # The the relative output directory. + out: proto + # Any options to provide to the plugin. + opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative diff --git a/buf.yaml b/buf.yaml index 7349a3516..cc4aced57 100644 --- a/buf.yaml +++ b/buf.yaml @@ -1,15 +1,16 @@ +version: v1beta1 + build: roots: - - . + - proto + - third_party/proto lint: use: - - MINIMAL + - BASIC - FILE_LOWER_SNAKE_CASE - UNARY_RPC - except: - - PACKAGE_DIRECTORY_MATCH ignore: - - third_party + - gogoproto breaking: use: - FILE diff --git a/cmd/contract_tests/main.go b/cmd/contract_tests/main.go index 487537824..1d3547629 100644 --- a/cmd/contract_tests/main.go +++ b/cmd/contract_tests/main.go @@ -22,7 +22,7 @@ func main() { // We need a proper example of path and data strings.HasPrefix(t.Name, "ABCI > /abci_query") || // We need to find a way to make a transaction before starting the tests, - // that hash should replace the dummy one in hte swagger file + // that hash should replace the dummy one in the openapi file strings.HasPrefix(t.Name, "Info > /tx") { t.Skip = true fmt.Printf("%s Has been skipped\n", t.Name) diff --git a/cmd/tendermint/commands/codec.go b/cmd/tendermint/commands/codec.go deleted file mode 100644 index 041b9e9ce..000000000 --- a/cmd/tendermint/commands/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package commands - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) -} diff --git a/cmd/tendermint/commands/debug/debug.go b/cmd/tendermint/commands/debug/debug.go index b5fc3c9e6..414b2b874 100644 --- a/cmd/tendermint/commands/debug/debug.go +++ b/cmd/tendermint/commands/debug/debug.go @@ -33,7 +33,7 @@ func init() { &nodeRPCAddr, flagNodeRPCAddr, "tcp://localhost:26657", - "The Tendermint node's RPC address (:)", + "the Tendermint node's RPC address (:)", ) DebugCmd.AddCommand(killCmd) diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index a21d8217e..678f70791 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -1,13 +1,13 @@ package debug import ( + "errors" "fmt" "io/ioutil" "os" "path/filepath" "time" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -32,14 +32,14 @@ func init() { &frequency, flagFrequency, 30, - "The frequency (seconds) in which to poll, aggregate and dump Tendermint debug data", + "the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data", ) dumpCmd.Flags().StringVar( &profAddr, flagProfAddr, "", - "The profiling server address (:)", + "the profiling server address (:)", ) } @@ -55,13 +55,13 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { if _, err := os.Stat(outDir); os.IsNotExist(err) { if err := os.Mkdir(outDir, os.ModePerm); err != nil { - return errors.Wrap(err, "failed to create output directory") + return fmt.Errorf("failed to create output directory: %w", err) } } rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { - return errors.Wrap(err, "failed to create new http client") + return fmt.Errorf("failed to create new http client: %w", err) } home := viper.GetString(cli.HomeFlag) diff --git a/cmd/tendermint/commands/debug/io.go b/cmd/tendermint/commands/debug/io.go index 240b2b23c..dcfff50c8 100644 --- a/cmd/tendermint/commands/debug/io.go +++ b/cmd/tendermint/commands/debug/io.go @@ -3,14 +3,13 @@ package debug import ( "archive/zip" "encoding/json" + "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strings" - - "github.com/pkg/errors" ) // zipDir zips all the contents found in src, including both files and @@ -29,7 +28,7 @@ func zipDir(src, dest string) error { dirName := filepath.Base(dest) baseDir := strings.TrimSuffix(dirName, filepath.Ext(dirName)) - filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -70,7 +69,6 @@ func zipDir(src, dest string) error { return err }) - return nil } // copyFile copies a file from src to dest and returns an error upon failure. The @@ -110,7 +108,7 @@ func copyFile(src, dest string) error { func writeStateJSONToFile(state interface{}, dir, filename string) error { stateJSON, err := json.MarshalIndent(state, "", " ") if err != nil { - return errors.Wrap(err, "failed to encode state dump") + return fmt.Errorf("failed to encode state dump: %w", err) } return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 40e298c72..a2c7a5fe1 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -1,6 +1,7 @@ package debug import ( + "errors" "fmt" "io/ioutil" "os" @@ -10,7 +11,6 @@ import ( "syscall" "time" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -46,7 +46,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { - return errors.Wrap(err, "failed to create new http client") + return fmt.Errorf("failed to create new http client: %w", err) } home := viper.GetString(cli.HomeFlag) @@ -58,7 +58,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // relevant files and directories that will be compressed into a file. tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") if err != nil { - return errors.Wrap(err, "failed to create temporary directory") + return fmt.Errorf("failed to create temporary directory: %w", err) } defer os.RemoveAll(tmpDir) diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 9e5e36a87..226bfadc7 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -1,6 +1,7 @@ package debug import ( + "context" "fmt" "io/ioutil" "net/http" @@ -8,8 +9,6 @@ import ( "path" "path/filepath" - "github.com/pkg/errors" - cfg "github.com/tendermint/tendermint/config" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -17,9 +16,9 @@ import ( // dumpStatus gets node status state dump from the Tendermint RPC and writes it // to file. It returns an error upon failure. func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { - status, err := rpc.Status() + status, err := rpc.Status(context.Background()) if err != nil { - return errors.Wrap(err, "failed to get node status") + return fmt.Errorf("failed to get node status: %w", err) } return writeStateJSONToFile(status, dir, filename) @@ -28,9 +27,9 @@ func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { // dumpNetInfo gets network information state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { - netInfo, err := rpc.NetInfo() + netInfo, err := rpc.NetInfo(context.Background()) if err != nil { - return errors.Wrap(err, "failed to get node network information") + return fmt.Errorf("failed to get node network information: %w", err) } return writeStateJSONToFile(netInfo, dir, filename) @@ -39,9 +38,9 @@ func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { // dumpConsensusState gets consensus state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { - consDump, err := rpc.DumpConsensusState() + consDump, err := rpc.DumpConsensusState(context.Background()) if err != nil { - return errors.Wrap(err, "failed to get node consensus dump") + return fmt.Errorf("failed to get node consensus dump: %w", err) } return writeStateJSONToFile(consDump, dir, filename) @@ -70,13 +69,13 @@ func dumpProfile(dir, addr, profile string, debug int) error { resp, err := http.Get(endpoint) // nolint: gosec if err != nil { - return errors.Wrapf(err, "failed to query for %s profile", profile) + return fmt.Errorf("failed to query for %s profile: %w", profile, err) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - return errors.Wrapf(err, "failed to read %s profile response body", profile) + return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 572bc974f..bd82ae624 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -5,7 +5,9 @@ import ( "github.com/spf13/cobra" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" ) // GenValidatorCmd allows the generation of a keypair for a @@ -16,9 +18,17 @@ var GenValidatorCmd = &cobra.Command{ Run: genValidator, } +func init() { + GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + func genValidator(cmd *cobra.Command, args []string) { - pv := privval.GenFilePV("", "") - jsbz, err := cdc.MarshalJSON(pv) + pv, err := privval.GenFilePV("", "", keyType) + if err != nil { + panic(err) + } + jsbz, err := tmjson.Marshal(pv) if err != nil { panic(err) } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index 1ece45132..62b4773bd 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -3,7 +3,6 @@ package commands import ( "fmt" - "github.com/pkg/errors" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" @@ -11,6 +10,7 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -22,6 +22,15 @@ var InitFilesCmd = &cobra.Command{ RunE: initFiles, } +var ( + keyType string +) + +func init() { + InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + func initFiles(cmd *cobra.Command, args []string) error { return initFilesWithConfig(config) } @@ -30,13 +39,19 @@ func initFilesWithConfig(config *cfg.Config) error { // private validator privValKeyFile := config.PrivValidatorKeyFile() privValStateFile := config.PrivValidatorStateFile() - var pv *privval.FilePV + var ( + pv *privval.FilePV + err error + ) if tmos.FileExists(privValKeyFile) { pv = privval.LoadFilePV(privValKeyFile, privValStateFile) logger.Info("Found private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { - pv = privval.GenFilePV(privValKeyFile, privValStateFile) + pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType) + if err != nil { + return err + } pv.Save() logger.Info("Generated private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) @@ -57,14 +72,20 @@ func initFilesWithConfig(config *cfg.Config) error { if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) } else { + genDoc := types.GenesisDoc{ ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } + } pubKey, err := pv.GetPubKey() if err != nil { - return errors.Wrap(err, "can't get pubkey") + return fmt.Errorf("can't get pubkey: %w", err) } genDoc.Validators = []types.GenesisValidator{{ Address: pubKey.Address(), diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go new file mode 100644 index 000000000..85c94118f --- /dev/null +++ b/cmd/tendermint/commands/light.go @@ -0,0 +1,284 @@ +package commands + +import ( + "bufio" + "context" + "errors" + "fmt" + "net/http" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/spf13/cobra" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/light" + lproxy "github.com/tendermint/tendermint/light/proxy" + lrpc "github.com/tendermint/tendermint/light/rpc" + dbs "github.com/tendermint/tendermint/light/store/db" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" +) + +// LightCmd represents the base command when called without any subcommands +var LightCmd = &cobra.Command{ + Use: "light [chainID]", + Short: "Run a light client proxy server, verifying Tendermint rpc", + Long: `Run a light client proxy server, verifying Tendermint rpc. + +All calls that can be tracked back to a block header by a proof +will be verified before passing them back to the caller. Other than +that, it will present the same interface as a full Tendermint node. + +Furthermore to the chainID, a fresh instance of a light client will +need a primary RPC address, a trusted hash and height and witness RPC addresses +(if not using sequential verification). To restart the node, thereafter +only the chainID is required. + +When /abci_query is called, the Merkle key path format is: + + /{store name}/{key} + +Please verify with your application that this Merkle key format is used (true +for applications built w/ Cosmos SDK). +`, + RunE: runProxy, + Args: cobra.ExactArgs(1), + Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657 + --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`, +} + +var ( + listenAddr string + primaryAddr string + witnessAddrsJoined string + chainID string + home string + maxOpenConnections int + + sequential bool + trustingPeriod time.Duration + trustedHeight int64 + trustedHash []byte + trustLevelStr string + + verbose bool + + primaryKey = []byte("primary") + witnessesKey = []byte("witnesses") +) + +func init() { + LightCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", + "serve the proxy on the given address") + LightCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", + "connect to a Tendermint node at this address") + LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", + "tendermint nodes to cross-check the primary node, comma-separated") + LightCmd.Flags().StringVar(&home, "home-dir", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), + "specify the home directory") + LightCmd.Flags().IntVar( + &maxOpenConnections, + "max-open-connections", + 900, + "maximum number of simultaneous connections (including WebSocket).") + LightCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour, + "trusting period that headers can be verified within. Should be significantly less than the unbonding period") + LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height") + LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash") + LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output") + LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3", + "trust level. Must be between 1/3 and 3/3", + ) + LightCmd.Flags().BoolVar(&sequential, "sequential", false, + "sequential verification. Verify all headers sequentially as opposed to using skipping verification", + ) +} + +func runProxy(cmd *cobra.Command, args []string) error { + // Initialise logger. + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + var option log.Option + if verbose { + option, _ = log.AllowLevel("debug") + } else { + option, _ = log.AllowLevel("info") + } + logger = log.NewFilter(logger, option) + + chainID = args[0] + logger.Info("Creating client...", "chainID", chainID) + + witnessesAddrs := []string{} + if witnessAddrsJoined != "" { + witnessesAddrs = strings.Split(witnessAddrsJoined, ",") + } + + db, err := dbm.NewGoLevelDB("light-client-db", home) + if err != nil { + return fmt.Errorf("can't create a db: %w", err) + } + + if primaryAddr == "" { // check to see if we can start from an existing state + var err error + primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) + if err != nil { + return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) + } + if primaryAddr == "" { + return errors.New("no primary address was provided nor found. Please provide a primary (using -p)." + + " Run the command: tendermint light --help for more information") + } + } else { + err := saveProviders(db, primaryAddr, witnessAddrsJoined) + if err != nil { + logger.Error("Unable to save primary and or witness addresses", "err", err) + } + } + + trustLevel, err := tmmath.ParseFraction(trustLevelStr) + if err != nil { + return fmt.Errorf("can't parse trust level: %w", err) + } + + options := []light.Option{ + light.Logger(logger), + light.ConfirmationFunction(func(action string) bool { + fmt.Println(action) + scanner := bufio.NewScanner(os.Stdin) + for { + scanner.Scan() + response := scanner.Text() + switch response { + case "y", "Y": + return true + case "n", "N": + return false + default: + fmt.Println("please input 'Y' or 'n' and press ENTER") + } + } + }), + } + + if sequential { + options = append(options, light.SequentialVerification()) + } else { + options = append(options, light.SkippingVerification(trustLevel)) + } + + var c *light.Client + if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation + c, err = light.NewHTTPClient( + context.Background(), + chainID, + light.TrustOptions{ + Period: trustingPeriod, + Height: trustedHeight, + Hash: trustedHash, + }, + primaryAddr, + witnessesAddrs, + dbs.New(db, chainID), + options..., + ) + } else { // continue from latest state + c, err = light.NewHTTPClientFromTrustedStore( + chainID, + trustingPeriod, + primaryAddr, + witnessesAddrs, + dbs.New(db, chainID), + options..., + ) + } + if err != nil { + return err + } + + rpcClient, err := rpchttp.New(primaryAddr, "/websocket") + if err != nil { + return fmt.Errorf("http client for %s: %w", primaryAddr, err) + } + + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = config.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = maxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= config.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + p := lproxy.Proxy{ + Addr: listenAddr, + Config: cfg, + Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())), + Logger: logger, + } + // Stop upon receiving SIGTERM or CTRL-C. + tmos.TrapSignal(logger, func() { + p.Listener.Close() + }) + + logger.Info("Starting proxy...", "laddr", listenAddr) + if err := p.ListenAndServe(); err != http.ErrServerClosed { + // Error starting or closing listener: + logger.Error("proxy ListenAndServe", "err", err) + } + + return nil +} + +func checkForExistingProviders(db dbm.DB) (string, []string, error) { + primaryBytes, err := db.Get(primaryKey) + if err != nil { + return "", []string{""}, err + } + witnessesBytes, err := db.Get(witnessesKey) + if err != nil { + return "", []string{""}, err + } + witnessesAddrs := strings.Split(string(witnessesBytes), ",") + return string(primaryBytes), witnessesAddrs, nil +} + +func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error { + err := db.Set(primaryKey, []byte(primaryAddr)) + if err != nil { + return fmt.Errorf("failed to save primary provider: %w", err) + } + err = db.Set(witnessesKey, []byte(witnessesAddrs)) + if err != nil { + return fmt.Errorf("failed to save witness providers: %w", err) + } + return nil +} + +func defaultMerkleKeyPathFn() lrpc.KeyPathFunc { + // regexp for extracting store name from /abci_query path + storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`) + + return func(path string, key []byte) (merkle.KeyPath, error) { + matches := storeNameRegexp.FindStringSubmatch(path) + if len(matches) != 2 { + return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp) + } + storeName := matches[1] + + kp := merkle.KeyPath{} + kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL) + kp = kp.AppendKey(key, merkle.KeyEncodingURL) + return kp, nil + } +} diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go deleted file mode 100644 index e28daa6d8..000000000 --- a/cmd/tendermint/commands/lite.go +++ /dev/null @@ -1,159 +0,0 @@ -package commands - -import ( - "net/http" - "os" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/spf13/cobra" - - "github.com/tendermint/go-amino" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" - lite "github.com/tendermint/tendermint/lite2" - lproxy "github.com/tendermint/tendermint/lite2/proxy" - lrpc "github.com/tendermint/tendermint/lite2/rpc" - dbs "github.com/tendermint/tendermint/lite2/store/db" - rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" -) - -// LiteCmd represents the base command when called without any subcommands -var LiteCmd = &cobra.Command{ - Use: "lite [chainID]", - Short: "Run a light client proxy server, verifying Tendermint rpc", - Long: `Run a light client proxy server, verifying Tendermint rpc. - -All calls that can be tracked back to a block header by a proof -will be verified before passing them back to the caller. Other than -that, it will present the same interface as a full Tendermint node. - -Example: - -start a fresh instance: - -lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 - --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD - -continue from latest state: - -lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 -`, - RunE: runProxy, - Args: cobra.ExactArgs(1), - Example: `lite cosmoshub-3 -p 52.57.29.196:26657 -w public-seed-node.cosmoshub.certus.one:26657 - --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`, -} - -var ( - listenAddr string - primaryAddr string - witnessAddrsJoined string - chainID string - home string - maxOpenConnections int - - trustingPeriod time.Duration - trustedHeight int64 - trustedHash []byte - - verbose bool -) - -func init() { - LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", - "Serve the proxy on the given address") - LiteCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", - "Connect to a Tendermint node at this address") - LiteCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", - "Tendermint nodes to cross-check the primary node, comma-separated") - LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") - LiteCmd.Flags().IntVar( - &maxOpenConnections, - "max-open-connections", - 900, - "Maximum number of simultaneous connections (including WebSocket).") - LiteCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour, - "Trusting period. Should be significantly less than the unbonding period") - LiteCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height") - LiteCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash") - LiteCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output") -} - -func runProxy(cmd *cobra.Command, args []string) error { - // Initialise logger. - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - var option log.Option - if verbose { - option, _ = log.AllowLevel("debug") - } else { - option, _ = log.AllowLevel("info") - } - logger = log.NewFilter(logger, option) - - chainID = args[0] - logger.Info("Creating client...", "chainID", chainID) - - witnessesAddrs := strings.Split(witnessAddrsJoined, ",") - - db, err := dbm.NewGoLevelDB("lite-client-db", home) - if err != nil { - return errors.Wrap(err, "new goleveldb") - } - - var c *lite.Client - if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation - c, err = lite.NewHTTPClient( - chainID, - lite.TrustOptions{ - Period: trustingPeriod, - Height: trustedHeight, - Hash: trustedHash, - }, - primaryAddr, - witnessesAddrs, - dbs.New(db, chainID), - lite.Logger(logger), - ) - } else { // continue from latest state - c, err = lite.NewHTTPClientFromTrustedStore( - chainID, - trustingPeriod, - primaryAddr, - witnessesAddrs, - dbs.New(db, chainID), - lite.Logger(logger), - ) - } - if err != nil { - return err - } - - rpcClient, err := rpchttp.New(primaryAddr, "/websocket") - if err != nil { - return errors.Wrapf(err, "http client for %s", primaryAddr) - } - p := lproxy.Proxy{ - Addr: listenAddr, - Config: &rpcserver.Config{MaxOpenConnections: maxOpenConnections}, - Codec: amino.NewCodec(), - Client: lrpc.NewClient(rpcClient, c), - Logger: logger, - } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - p.Listener.Close() - }) - - logger.Info("Starting proxy...", "laddr", listenAddr) - if err := p.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - logger.Error("proxy ListenAndServe", "err", err) - } - - return nil -} diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go index 35c3c354d..9ac35fd50 100644 --- a/cmd/tendermint/commands/probe_upnp.go +++ b/cmd/tendermint/commands/probe_upnp.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/p2p/upnp" ) @@ -21,7 +22,7 @@ func probeUpnp(cmd *cobra.Command, args []string) error { fmt.Println("Probe failed: ", err) } else { fmt.Println("Probe success!") - jsonBytes, err := cdc.MarshalJSON(capabilities) + jsonBytes, err := tmjson.Marshal(capabilities) if err != nil { return err } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index eef837c60..79799a2ed 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" ) // ResetAllCmd removes the database of this Tendermint core @@ -21,7 +22,9 @@ var ResetAllCmd = &cobra.Command{ var keepAddrBook bool func init() { - ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact") + ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") + ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") } // ResetPrivValidatorCmd resets the private validator files. @@ -58,7 +61,9 @@ func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) } // recreate the dbDir since the privVal state needs to live there - tmos.EnsureDir(dbDir, 0700) + if err := tmos.EnsureDir(dbDir, 0700); err != nil { + logger.Error("unable to recreate dbDir", "err", err) + } resetFilePV(privValKeyFile, privValStateFile, logger) } @@ -69,7 +74,10 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) { logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile, "stateFile", privValStateFile) } else { - pv := privval.GenFilePV(privValKeyFile, privValStateFile) + pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType) + if err != nil { + panic(err) + } pv.Save() logger.Info("Generated private validator file", "keyFile", privValKeyFile, "stateFile", privValStateFile) diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 4fa0cf3f7..664f8ff14 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -23,7 +23,7 @@ func init() { } func registerFlagsRootCmd(cmd *cobra.Command) { - cmd.PersistentFlags().String("log_level", config.LogLevel, "Log level") + cmd.PersistentFlags().String("log_level", config.LogLevel, "log level") } // ParseConfig retrieves the default environment configuration, diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index a39f92d38..d1e5964b2 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -168,5 +168,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0666) + return ioutil.WriteFile(cfile, []byte(data), 0600) } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 628a0d173..af77553fa 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -7,7 +7,6 @@ import ( "io" "os" - "github.com/pkg/errors" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" @@ -23,31 +22,34 @@ var ( // These are exposed for convenience of commands embedding a tendermint node func AddNodeFlags(cmd *cobra.Command) { // bind flags - cmd.Flags().String("moniker", config.Moniker, "Node Name") + cmd.Flags().String("moniker", config.Moniker, "node name") // priv val flags cmd.Flags().String( "priv_validator_laddr", config.PrivValidatorListenAddr, - "Socket address to listen on for connections from external priv_validator process") + "socket address to listen on for connections from external priv_validator process") // node flags - cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing") + cmd.Flags().Bool("fast_sync", config.FastSyncMode, "fast blockchain syncing") cmd.Flags().BytesHexVar( &genesisHash, "genesis_hash", []byte{}, - "Optional SHA-256 hash of the genesis file") + "optional SHA-256 hash of the genesis file") + cmd.Flags().Int64("consensus.double_sign_check_height", config.Consensus.DoubleSignCheckHeight, + "how many blocks to look back to check existence of the node's "+ + "consensus votes before joining consensus") // abci flags cmd.Flags().String( "proxy_app", config.ProxyApp, - "Proxy app address, or one of: 'kvstore',"+ + "proxy app address, or one of: 'kvstore',"+ " 'persistent_kvstore',"+ " 'counter',"+ " 'counter_serial' or 'noop' for local testing.") - cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)") + cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") // rpc flags cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") @@ -55,41 +57,42 @@ func AddNodeFlags(cmd *cobra.Command) { "rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required") - cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods") + cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") + cmd.Flags().String("rpc.pprof_laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") // p2p flags cmd.Flags().String( "p2p.laddr", config.P2P.ListenAddress, - "Node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") - cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") + "node listen address. (0.0.0.0:0 means any interface, any port)") + cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") + cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().String("p2p.unconditional_peer_ids", - config.P2P.UnconditionalPeerIDs, "Comma-delimited IDs of unconditional peers") - cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding") - cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") - cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") - cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") + config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") + cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") + cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") + cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "enable/disable seed mode") + cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") // consensus flags cmd.Flags().Bool( "consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, - "Set this to false to only produce blocks when there are txs or when the AppHash changes") + "set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().String( "consensus.create_empty_blocks_interval", config.Consensus.CreateEmptyBlocksInterval.String(), - "The possible interval between empty blocks") + "the possible interval between empty blocks") // db flags cmd.Flags().String( "db_backend", config.DBBackend, - "Database backend: goleveldb | cleveldb | boltdb | rocksdb") + "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") cmd.Flags().String( "db_dir", config.DBPath, - "Database directory") + "database directory") } // NewRunNodeCmd returns the command that allows the CLI to start a node. @@ -117,7 +120,9 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command { // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() { if n.IsRunning() { - n.Stop() + if err := n.Stop(); err != nil { + logger.Error("unable to stop the node", "error", err) + } } }) @@ -138,18 +143,18 @@ func checkGenesisHash(config *cfg.Config) error { // Calculate SHA-256 hash of the genesis file. f, err := os.Open(config.GenesisFile()) if err != nil { - return errors.Wrap(err, "can't open genesis file") + return fmt.Errorf("can't open genesis file: %w", err) } defer f.Close() h := sha256.New() if _, err := io.Copy(h, f); err != nil { - return errors.Wrap(err, "error when hashing genesis file") + return fmt.Errorf("error when hashing genesis file: %w", err) } actualHash := h.Sum(nil) // Compare with the flag. if !bytes.Equal(genesisHash, actualHash) { - return errors.Errorf( + return fmt.Errorf( "--genesis_hash=%X does not match %s hash: %X", genesisHash, config.GenesisFile(), actualHash) } diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 4b885a5c3..e3980743a 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -3,9 +3,9 @@ package commands import ( "fmt" - "github.com/pkg/errors" "github.com/spf13/cobra" + tmjson "github.com/tendermint/tendermint/libs/json" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" ) @@ -27,12 +27,12 @@ func showValidator(cmd *cobra.Command, args []string) error { pubKey, err := pv.GetPubKey() if err != nil { - return errors.Wrap(err, "can't get pubkey") + return fmt.Errorf("can't get pubkey: %w", err) } - bz, err := cdc.MarshalJSON(pubKey) + bz, err := tmjson.Marshal(pubKey) if err != nil { - return errors.Wrap(err, "failed to marshal private validator pubkey") + return fmt.Errorf("failed to marshal private validator pubkey: %w", err) } fmt.Println(string(bz)) diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index 54bb1363e..85e66843c 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -7,7 +7,6 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -16,6 +15,7 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -23,6 +23,7 @@ import ( var ( nValidators int nNonValidators int + initialHeight int64 configFile string outputDir string nodeDirPrefix string @@ -42,36 +43,40 @@ const ( func init() { TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, - "Number of validators to initialize the testnet with") + "number of validators to initialize the testnet with") TestnetFilesCmd.Flags().StringVar(&configFile, "config", "", - "Config file to use (note some options may be overwritten)") + "config file to use (note some options may be overwritten)") TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, - "Number of non-validators to initialize the testnet with") + "number of non-validators to initialize the testnet with") TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", - "Directory to store initialization data for the testnet") + "directory to store initialization data for the testnet") TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", - "Prefix the directory name for each node with (node results in node0, node1, ...)") + "prefix the directory name for each node with (node results in node0, node1, ...)") + TestnetFilesCmd.Flags().Int64Var(&initialHeight, "initial-height", 0, + "initial height of the first block") TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, - "Update config of each node with the list of persistent peers build using either"+ + "update config of each node with the list of persistent peers build using either"+ " hostname-prefix or"+ " starting-ip-address") TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", - "Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") + "hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", - "Hostname suffix ("+ + "hostname suffix ("+ "\".xyz.com\""+ " results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)") TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", - "Starting IP address ("+ + "starting IP address ("+ "\"192.168.0.1\""+ " results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, - "Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") + "manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, "P2P Port") TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, - "Randomize the moniker for each generated node") + "randomize the moniker for each generated node") + TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") } // TestnetFilesCmd allows initialisation of files for a Tendermint testnet. @@ -134,7 +139,9 @@ func testnetFiles(cmd *cobra.Command, args []string) error { return err } - initFilesWithConfig(config) + if err := initFilesWithConfig(config); err != nil { + return err + } pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey) pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState) @@ -142,7 +149,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { pubKey, err := pv.GetPubKey() if err != nil { - return errors.Wrap(err, "can't get pubkey") + return fmt.Errorf("can't get pubkey: %w", err) } genVals[i] = types.GenesisValidator{ Address: pubKey.Address(), @@ -168,15 +175,23 @@ func testnetFiles(cmd *cobra.Command, args []string) error { return err } - initFilesWithConfig(config) + if err := initFilesWithConfig(config); err != nil { + return err + } } // Generate genesis doc from generated validators genDoc := &types.GenesisDoc{ ChainID: "chain-" + tmrand.Str(6), - ConsensusParams: types.DefaultConsensusParams(), GenesisTime: tmtime.Now(), + InitialHeight: initialHeight, Validators: genVals, + ConsensusParams: types.DefaultConsensusParams(), + } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } } // Write genesis file. diff --git a/cmd/tendermint/commands/version.go b/cmd/tendermint/commands/version.go index f9f545e59..d1a7fba58 100644 --- a/cmd/tendermint/commands/version.go +++ b/cmd/tendermint/commands/version.go @@ -13,6 +13,6 @@ var VersionCmd = &cobra.Command{ Use: "version", Short: "Show version info", Run: func(cmd *cobra.Command, args []string) { - fmt.Println(version.Version) + fmt.Println(version.TMCoreSemVer) }, } diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 615b7e065..311a59a65 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -17,7 +17,7 @@ func main() { cmd.GenValidatorCmd, cmd.InitFilesCmd, cmd.ProbeUpnpCmd, - cmd.LiteCmd, + cmd.LightCmd, cmd.ReplayCmd, cmd.ReplayConsoleCmd, cmd.ResetAllCmd, diff --git a/config/config.go b/config/config.go index c246493e4..514a23b88 100644 --- a/config/config.go +++ b/config/config.go @@ -1,13 +1,13 @@ package config import ( + "encoding/hex" + "errors" "fmt" "net/http" "os" "path/filepath" "time" - - "github.com/pkg/errors" ) const ( @@ -51,11 +51,6 @@ var ( defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) ) -var ( - oldPrivVal = "priv_validator.json" - oldPrivValPath = filepath.Join(defaultConfigDir, oldPrivVal) -) - // Config defines the top level configuration for a Tendermint node type Config struct { // Top level options use an anonymous struct @@ -65,6 +60,7 @@ type Config struct { RPC *RPCConfig `mapstructure:"rpc"` P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` + StateSync *StateSyncConfig `mapstructure:"statesync"` FastSync *FastSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx_index"` @@ -78,6 +74,7 @@ func DefaultConfig() *Config { RPC: DefaultRPCConfig(), P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), + StateSync: DefaultStateSyncConfig(), FastSync: DefaultFastSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), @@ -92,6 +89,7 @@ func TestConfig() *Config { RPC: TestRPCConfig(), P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), + StateSync: TestStateSyncConfig(), FastSync: TestFastSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), @@ -116,24 +114,27 @@ func (cfg *Config) ValidateBasic() error { return err } if err := cfg.RPC.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [rpc] section") + return fmt.Errorf("error in [rpc] section: %w", err) } if err := cfg.P2P.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [p2p] section") + return fmt.Errorf("error in [p2p] section: %w", err) } if err := cfg.Mempool.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [mempool] section") + return fmt.Errorf("error in [mempool] section: %w", err) + } + if err := cfg.StateSync.ValidateBasic(); err != nil { + return fmt.Errorf("error in [statesync] section: %w", err) } if err := cfg.FastSync.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [fastsync] section") + return fmt.Errorf("error in [fastsync] section: %w", err) } if err := cfg.Consensus.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [consensus] section") + return fmt.Errorf("error in [consensus] section: %w", err) + } + if err := cfg.Instrumentation.ValidateBasic(); err != nil { + return fmt.Errorf("error in [instrumentation] section: %w", err) } - return errors.Wrap( - cfg.Instrumentation.ValidateBasic(), - "Error in [instrumentation] section", - ) + return nil } //----------------------------------------------------------------------------- @@ -176,6 +177,9 @@ type BaseConfig struct { //nolint: maligned // - EXPERIMENTAL // - requires gcc // - use rocksdb build tag (go build -tags rocksdb) + // * badgerdb (uses github.com/dgraph-io/badger) + // - EXPERIMENTAL + // - use badgerdb build tag (go build -tags badgerdb) DBBackend string `mapstructure:"db_backend"` // Database directory @@ -206,9 +210,6 @@ type BaseConfig struct { //nolint: maligned // Mechanism to connect to the ABCI application: socket | grpc ABCI string `mapstructure:"abci"` - // TCP or UNIX socket address for the profiling server to listen on - ProfListenAddress string `mapstructure:"prof_laddr"` - // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter_peers"` // false @@ -226,7 +227,6 @@ func DefaultBaseConfig() BaseConfig { ABCI: "socket", LogLevel: DefaultPackageLogLevels(), LogFormat: LogFormatPlain, - ProfListenAddress: "", FastSyncMode: true, FilterPeers: false, DBBackend: "goleveldb", @@ -263,12 +263,6 @@ func (cfg BaseConfig) PrivValidatorStateFile() string { return rootify(cfg.PrivValidatorState, cfg.RootDir) } -// OldPrivValidatorFile returns the full path of the priv_validator.json from pre v0.28.0. -// TODO: eventually remove. -func (cfg BaseConfig) OldPrivValidatorFile() string { - return rootify(oldPrivValPath, cfg.RootDir) -} - // NodeKeyFile returns the full path to the node_key.json file func (cfg BaseConfig) NodeKeyFile() string { return rootify(cfg.NodeKey, cfg.RootDir) @@ -298,7 +292,7 @@ func DefaultLogLevel() string { // DefaultPackageLogLevels returns a default log level setting so all packages // log at "error", while the `state` and `main` packages log at "info" func DefaultPackageLogLevels() string { - return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel()) + return fmt.Sprintf("main:info,state:info,statesync:info,*:%s", DefaultLogLevel()) } //----------------------------------------------------------------------------- @@ -385,6 +379,9 @@ type RPCConfig struct { // NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. // Otherwise, HTTP server is run. TLSKeyFile string `mapstructure:"tls_key_file"` + + // pprof listen address (https://golang.org/pkg/net/http/pprof) + PprofListenAddress string `mapstructure:"pprof_laddr"` } // DefaultRPCConfig returns a default configuration for the RPC server @@ -550,9 +547,6 @@ type P2PConfig struct { //nolint: maligned // Testing params. // Force dial to fail TestDialFail bool `mapstructure:"test_dial_fail"` - // FUzz connection - TestFuzz bool `mapstructure:"test_fuzz"` - TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` } // DefaultP2PConfig returns a default configuration for the peer-to-peer layer @@ -576,8 +570,6 @@ func DefaultP2PConfig() *P2PConfig { HandshakeTimeout: 20 * time.Second, DialTimeout: 3 * time.Second, TestDialFail: false, - TestFuzz: false, - TestFuzzConfig: DefaultFuzzConnConfig(), } } @@ -622,39 +614,29 @@ func (cfg *P2PConfig) ValidateBasic() error { return nil } -// FuzzConnConfig is a FuzzedConnection configuration. -type FuzzConnConfig struct { - Mode int - MaxDelay time.Duration - ProbDropRW float64 - ProbDropConn float64 - ProbSleep float64 -} - -// DefaultFuzzConnConfig returns the default config. -func DefaultFuzzConnConfig() *FuzzConnConfig { - return &FuzzConnConfig{ - Mode: FuzzModeDrop, - MaxDelay: 3 * time.Second, - ProbDropRW: 0.2, - ProbDropConn: 0.00, - ProbSleep: 0.00, - } -} - //----------------------------------------------------------------------------- // MempoolConfig // MempoolConfig defines the configuration options for the Tendermint mempool type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` - Size int `mapstructure:"size"` - MaxTxsBytes int64 `mapstructure:"max_txs_bytes"` - CacheSize int `mapstructure:"cache_size"` - MaxTxBytes int `mapstructure:"max_tx_bytes"` + RootDir string `mapstructure:"home"` + Recheck bool `mapstructure:"recheck"` + Broadcast bool `mapstructure:"broadcast"` + WalPath string `mapstructure:"wal_dir"` + // Maximum number of transactions in the mempool + Size int `mapstructure:"size"` + // Limit the total size of all txs in the mempool. + // This only accounts for raw transactions (e.g. given 1MB transactions and + // max_txs_bytes=5MB, mempool will only accept 5 transactions). + MaxTxsBytes int64 `mapstructure:"max_txs_bytes"` + // Size of the cache (used to filter transactions we saw earlier) in transactions + CacheSize int `mapstructure:"cache_size"` + // Maximum size of a single transaction + // NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. + MaxTxBytes int `mapstructure:"max_tx_bytes"` + // Maximum size of a batch of transactions to send to a peer + // Including space needed by encoding (one varint per transaction). + MaxBatchBytes int `mapstructure:"max_batch_bytes"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool @@ -665,10 +647,11 @@ func DefaultMempoolConfig() *MempoolConfig { WalPath: "", // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck - Size: 5000, - MaxTxsBytes: 1024 * 1024 * 1024, // 1GB - CacheSize: 10000, - MaxTxBytes: 1024 * 1024, // 1MB + Size: 5000, + MaxTxsBytes: 1024 * 1024 * 1024, // 1GB + CacheSize: 10000, + MaxTxBytes: 1024 * 1024, // 1MB + MaxBatchBytes: 10 * 1024 * 1024, // 10MB } } @@ -704,6 +687,79 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.MaxTxBytes < 0 { return errors.New("max_tx_bytes can't be negative") } + if cfg.MaxBatchBytes < 0 { + return errors.New("max_batch_bytes can't be negative") + } + if cfg.MaxBatchBytes <= cfg.MaxTxBytes { + return errors.New("max_batch_bytes can't be less or equal to max_tx_bytes") + } + return nil +} + +//----------------------------------------------------------------------------- +// StateSyncConfig + +// StateSyncConfig defines the configuration for the Tendermint state sync service +type StateSyncConfig struct { + Enable bool `mapstructure:"enable"` + TempDir string `mapstructure:"temp_dir"` + RPCServers []string `mapstructure:"rpc_servers"` + TrustPeriod time.Duration `mapstructure:"trust_period"` + TrustHeight int64 `mapstructure:"trust_height"` + TrustHash string `mapstructure:"trust_hash"` + DiscoveryTime time.Duration `mapstructure:"discovery_time"` +} + +func (cfg *StateSyncConfig) TrustHashBytes() []byte { + // validated in ValidateBasic, so we can safely panic here + bytes, err := hex.DecodeString(cfg.TrustHash) + if err != nil { + panic(err) + } + return bytes +} + +// DefaultStateSyncConfig returns a default configuration for the state sync service +func DefaultStateSyncConfig() *StateSyncConfig { + return &StateSyncConfig{ + TrustPeriod: 168 * time.Hour, + DiscoveryTime: 15 * time.Second, + } +} + +// TestFastSyncConfig returns a default configuration for the state sync service +func TestStateSyncConfig() *StateSyncConfig { + return DefaultStateSyncConfig() +} + +// ValidateBasic performs basic validation. +func (cfg *StateSyncConfig) ValidateBasic() error { + if cfg.Enable { + if len(cfg.RPCServers) == 0 { + return errors.New("rpc_servers is required") + } + if len(cfg.RPCServers) < 2 { + return errors.New("at least two rpc_servers entries is required") + } + for _, server := range cfg.RPCServers { + if len(server) == 0 { + return errors.New("found empty rpc_servers entry") + } + } + if cfg.TrustPeriod <= 0 { + return errors.New("trusted_period is required") + } + if cfg.TrustHeight <= 0 { + return errors.New("trusted_height is required") + } + if len(cfg.TrustHash) == 0 { + return errors.New("trusted_hash is required") + } + _, err := hex.DecodeString(cfg.TrustHash) + if err != nil { + return fmt.Errorf("invalid trusted_hash: %w", err) + } + } return nil } @@ -751,13 +807,23 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal_file"` walFile string // overrides WalPath if set - TimeoutPropose time.Duration `mapstructure:"timeout_propose"` - TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` - TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` - TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` - TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` + // How long we wait for a proposal block before prevoting nil + TimeoutPropose time.Duration `mapstructure:"timeout_propose"` + // How much timeout_propose increases with each round + TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` + // How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) + TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` + // How much the timeout_prevote increases with each round + TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` + // How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) + TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` + // How much the timeout_precommit increases with each round TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` - TimeoutCommit time.Duration `mapstructure:"timeout_commit"` + // How long we wait after committing a block, before starting on the new + // height (this gives us a chance to receive some more precommits, even + // though we already have +2/3). + // NOTE: when modifying, make sure to update time_iota_ms genesis parameter + TimeoutCommit time.Duration `mapstructure:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` @@ -769,6 +835,8 @@ type ConsensusConfig struct { // Reactor sleep duration parameters PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"` PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"` + + DoubleSignCheckHeight int64 `mapstructure:"double_sign_check_height"` } // DefaultConsensusConfig returns a default configuration for the consensus service @@ -787,6 +855,7 @@ func DefaultConsensusConfig() *ConsensusConfig { CreateEmptyBlocksInterval: 0 * time.Second, PeerGossipSleepDuration: 100 * time.Millisecond, PeerQueryMaj23SleepDuration: 2000 * time.Millisecond, + DoubleSignCheckHeight: int64(0), } } @@ -799,10 +868,12 @@ func TestConsensusConfig() *ConsensusConfig { cfg.TimeoutPrevoteDelta = 1 * time.Millisecond cfg.TimeoutPrecommit = 10 * time.Millisecond cfg.TimeoutPrecommitDelta = 1 * time.Millisecond + // NOTE: when modifying, make sure to update time_iota_ms (testGenesisFmt) in toml.go cfg.TimeoutCommit = 10 * time.Millisecond cfg.SkipTimeoutCommit = true cfg.PeerGossipSleepDuration = 5 * time.Millisecond cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond + cfg.DoubleSignCheckHeight = int64(0) return cfg } @@ -812,21 +883,21 @@ func (cfg *ConsensusConfig) WaitForTxs() bool { } // Propose returns the amount of time to wait for a proposal -func (cfg *ConsensusConfig) Propose(round int) time.Duration { +func (cfg *ConsensusConfig) Propose(round int32) time.Duration { return time.Duration( cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), ) * time.Nanosecond } // Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes -func (cfg *ConsensusConfig) Prevote(round int) time.Duration { +func (cfg *ConsensusConfig) Prevote(round int32) time.Duration { return time.Duration( cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), ) * time.Nanosecond } // Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits -func (cfg *ConsensusConfig) Precommit(round int) time.Duration { +func (cfg *ConsensusConfig) Precommit(round int32) time.Duration { return time.Duration( cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), ) * time.Nanosecond @@ -884,6 +955,9 @@ func (cfg *ConsensusConfig) ValidateBasic() error { if cfg.PeerQueryMaj23SleepDuration < 0 { return errors.New("peer_query_maj23_sleep_duration can't be negative") } + if cfg.DoubleSignCheckHeight < 0 { + return errors.New("double_sign_check_height can't be negative") + } return nil } @@ -906,31 +980,12 @@ type TxIndexConfig struct { // 2) "kv" (default) - the simplest possible indexer, // backed by key-value storage (defaults to levelDB; see DBBackend). Indexer string `mapstructure:"indexer"` - - // Comma-separated list of compositeKeys to index (by default the only key is "tx.hash") - // - // You can also index transactions by height by adding "tx.height" key here. - // - // It's recommended to index only a subset of keys due to possible memory - // bloat. This is, of course, depends on the indexer's DB and the volume of - // transactions. - IndexKeys string `mapstructure:"index_keys"` - - // When set to true, tells indexer to index all compositeKeys (predefined keys: - // "tx.hash", "tx.height" and all keys from DeliverTx responses). - // - // Note this may be not desirable (see the comment above). IndexKeys has a - // precedence over IndexAllKeys (i.e. when given both, IndexKeys will be - // indexed). - IndexAllKeys bool `mapstructure:"index_all_keys"` } // DefaultTxIndexConfig returns a default configuration for the transaction indexer. func DefaultTxIndexConfig() *TxIndexConfig { return &TxIndexConfig{ - Indexer: "kv", - IndexKeys: "", - IndexAllKeys: false, + Indexer: "kv", } } diff --git a/config/config_test.go b/config/config_test.go index c83f1c3f5..6a46933bc 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDefaultConfig(t *testing.T) { @@ -122,6 +123,11 @@ func TestMempoolConfigValidateBasic(t *testing.T) { } } +func TestStateSyncConfigValidateBasic(t *testing.T) { + cfg := TestStateSyncConfig() + require.NoError(t, cfg.ValidateBasic()) +} + func TestFastSyncConfigValidateBasic(t *testing.T) { cfg := TestFastSyncConfig() assert.NoError(t, cfg.ValidateBasic()) @@ -158,6 +164,7 @@ func TestConsensusConfig_ValidateBasic(t *testing.T) { "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, + "DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true}, } for desc, tc := range testcases { tc := tc // appease linter diff --git a/config/toml.go b/config/toml.go index 3fe4d1aac..19973a3fb 100644 --- a/config/toml.go +++ b/config/toml.go @@ -4,7 +4,9 @@ import ( "bytes" "fmt" "io/ioutil" + "os" "path/filepath" + "strings" "text/template" tmos "github.com/tendermint/tendermint/libs/os" @@ -17,7 +19,10 @@ var configTemplate *template.Template func init() { var err error - if configTemplate, err = template.New("configFileTemplate").Parse(defaultConfigTemplate); err != nil { + tmpl := template.New("configFileTemplate").Funcs(template.FuncMap{ + "StringsJoin": strings.Join, + }) + if configTemplate, err = tmpl.Parse(defaultConfigTemplate); err != nil { panic(err) } } @@ -59,7 +64,7 @@ func WriteConfigFile(configFilePath string, config *Config) { panic(err) } - tmos.MustWriteFile(configFilePath, buffer.Bytes(), 0644) + mustWriteFile(configFilePath, buffer.Bytes(), 0644) } // Note: any changes to the comments/variables/mapstructure @@ -72,7 +77,9 @@ const defaultConfigTemplate = `# This is a TOML config file. # "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable # or --home cmd flag. -##### main base config options ##### +####################################################################### +### Main Base Config Options ### +####################################################################### # TCP or UNIX socket address of the ABCI application, # or the name of an ABCI application compiled in with the Tendermint binary @@ -86,7 +93,7 @@ moniker = "{{ .BaseConfig.Moniker }}" # and verifying their commits fast_sync = {{ .BaseConfig.FastSyncMode }} -# Database backend: goleveldb | cleveldb | boltdb | rocksdb +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go # - stable @@ -102,6 +109,9 @@ fast_sync = {{ .BaseConfig.FastSyncMode }} # - EXPERIMENTAL # - requires gcc # - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) db_backend = "{{ .BaseConfig.DBBackend }}" # Database directory @@ -134,16 +144,18 @@ node_key_file = "{{ js .BaseConfig.NodeKey }}" # Mechanism to connect to the ABCI application: socket | grpc abci = "{{ .BaseConfig.ABCI }}" -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "{{ .BaseConfig.ProfListenAddress }}" - # If true, query the ABCI app on connecting to a new peer # so the app can decide if we should keep the connection or not filter_peers = {{ .BaseConfig.FilterPeers }} -##### advanced configuration options ##### -##### rpc server configuration options ##### +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### [rpc] # TCP or UNIX socket address for the RPC server to listen on @@ -222,7 +234,12 @@ tls_cert_file = "{{ .RPC.TLSCertFile }}" # Otherwise, HTTP server is run. tls_key_file = "{{ .RPC.TLSKeyFile }}" -##### peer to peer configuration options ##### +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "{{ .RPC.PprofListenAddress }}" + +####################################################### +### P2P Configuration Options ### +####################################################### [p2p] # Address to listen for incoming connections @@ -293,7 +310,9 @@ allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} handshake_timeout = "{{ .P2P.HandshakeTimeout }}" dial_timeout = "{{ .P2P.DialTimeout }}" -##### mempool configuration options ##### +####################################################### +### Mempool Configurattion Option ### +####################################################### [mempool] recheck = {{ .Mempool.Recheck }} @@ -312,31 +331,83 @@ max_txs_bytes = {{ .Mempool.MaxTxsBytes }} cache_size = {{ .Mempool.CacheSize }} # Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. max_tx_bytes = {{ .Mempool.MaxTxBytes }} -##### fast sync configuration options ##### +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +max_batch_bytes = {{ .Mempool.MaxBatchBytes }} + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = {{ .StateSync.Enable }} + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "{{ StringsJoin .StateSync.RPCServers "," }}" +trust_height = {{ .StateSync.TrustHeight }} +trust_hash = "{{ .StateSync.TrustHash }}" +trust_period = "{{ .StateSync.TrustPeriod }}" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "{{ .StateSync.DiscoveryTime }}" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "{{ .StateSync.TempDir }}" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### [fastsync] # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation # 2) "v1" - refactor of v0 version for better testability -# 3) "v2" - refactor of v1 version for better usability +# 2) "v2" - complete redesign of v0, optimized for testability & readability version = "{{ .FastSync.Version }}" -##### consensus configuration options ##### +####################################################### +### Consensus Configuration Options ### +####################################################### [consensus] wal_file = "{{ js .Consensus.WalPath }}" +# How long we wait for a proposal block before prevoting nil timeout_propose = "{{ .Consensus.TimeoutPropose }}" +# How much timeout_propose increases with each round timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" +# How much the timeout_prevote increases with each round timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" +# How much the timeout_precommit increases with each round timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). timeout_commit = "{{ .Consensus.TimeoutCommit }}" +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }} + # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} @@ -348,39 +419,25 @@ create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" -##### transactions indexer configuration options ##### +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### [tx_index] # What indexer to use for transactions # +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "{{ .TxIndex.Indexer }}" -# Comma-separated list of compositeKeys to index (by default the only key is "tx.hash") -# Remember that Event has the following structure: type.key -# type: [ -# key: value, -# ... -# ] -# -# You can also index transactions by height by adding "tx.height" key here. -# -# It's recommended to index only a subset of keys due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_keys = "{{ .TxIndex.IndexKeys }}" - -# When set to true, tells indexer to index all compositeKeys (predefined keys: -# "tx.hash", "tx.height" and all keys from DeliverTx responses). -# -# Note this may be not desirable (see the comment above). IndexKeys has a -# precedence over IndexAllKeys (i.e. when given both, IndexKeys will be -# indexed). -index_all_keys = {{ .TxIndex.IndexAllKeys }} - -##### instrumentation configuration options ##### +####################################################### +### Instrumentation Configuration Options ### +####################################################### [instrumentation] # When true, Prometheus metrics are served under /metrics on @@ -436,19 +493,44 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config { chainID = "tendermint_test" } testGenesis := fmt.Sprintf(testGenesisFmt, chainID) - tmos.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) + mustWriteFile(genesisFilePath, []byte(testGenesis), 0644) } // we always overwrite the priv val - tmos.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) - tmos.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) + mustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644) + mustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644) config := TestConfig().SetRoot(rootDir) return config } +func mustWriteFile(filePath string, contents []byte, mode os.FileMode) { + if err := ioutil.WriteFile(filePath, contents, mode); err != nil { + tmos.Exit(fmt.Sprintf("failed to write file: %v", err)) + } +} + var testGenesisFmt = `{ "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "%s", + "initial_height": "1", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "10" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "1048576" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + }, + "version": {} + }, "validators": [ { "pub_key": { @@ -476,6 +558,6 @@ var testPrivValidatorKey = `{ var testPrivValidatorState = `{ "height": "0", - "round": "0", + "round": 0, "step": 0 }` diff --git a/config/toml_test.go b/config/toml_test.go index 5910f10c5..f19710687 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -25,7 +25,7 @@ func TestEnsureRoot(t *testing.T) { // setup temp dir for test tmpDir, err := ioutil.TempDir("", "config-test") require.Nil(err) - defer os.RemoveAll(tmpDir) // nolint: errcheck + defer os.RemoveAll(tmpDir) // create root dir EnsureRoot(tmpDir) diff --git a/consensus/README.md b/consensus/README.md index 35b30b0fa..44a36012f 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -1 +1,3 @@ +# Consensus + See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/spec/tree/master/spec/reactors/consensus) for more information. diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 649d47fbb..fa945c310 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -3,31 +3,219 @@ package consensus import ( "context" "fmt" + "os" + "path" "sync" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + + abcicli "github.com/tendermint/tendermint/abci/client" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/evidence" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" + mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) //---------------------------------------------- // byzantine failures +// Byzantine node sends two different prevotes (nil and blockID) to the same validator +func TestByzantinePrevoteEquivocation(t *testing.T) { + const nValidators = 4 + const byzantineNode = 0 + const prevoteHeight = int64(2) + testName := "consensus_byzantine_test" + tickerFunc := newMockTickerFunc(true) + appFunc := newCounter + + genDoc, privVals := randGenesisDoc(nValidators, false, 30) + css := make([]*State, nValidators) + + for i := 0; i < nValidators; i++ { + logger := consensusLogger().With("test", "byzantine", "validator", i) + stateDB := dbm.NewMemDB() // each state needs its own db + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + defer os.RemoveAll(thisConfig.RootDir) + ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := appFunc() + vals := types.TM2PB.ValidatorUpdates(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) + + blockDB := dbm.NewMemDB() + blockStore := store.NewBlockStore(blockDB) + + // one for mempool, one for consensus + mtx := new(tmsync.Mutex) + proxyAppConnMem := abcicli.NewLocalClient(mtx, app) + proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + + // Make Mempool + mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) + mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + if thisConfig.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } + + // Make a full instance of the evidence pool + evidenceDB := dbm.NewMemDB() + evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.NoError(t, err) + evpool.SetLogger(logger.With("module", "evidence")) + + // Make State + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) + cs.SetLogger(cs.Logger) + // set private validator + pv := privVals[i] + cs.SetPrivValidator(pv) + + eventBus := types.NewEventBus() + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + err = eventBus.Start() + require.NoError(t, err) + cs.SetEventBus(eventBus) + + cs.SetTimeoutTicker(tickerFunc()) + cs.SetLogger(logger) + + css[i] = cs + } + + // initialize the reactors for each of the validators + reactors := make([]*Reactor, nValidators) + blocksSubs := make([]types.Subscription, 0) + eventBuses := make([]*types.EventBus, nValidators) + for i := 0; i < nValidators; i++ { + reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states + reactors[i].SetLogger(css[i].Logger) + + // eventBus is already started with the cs + eventBuses[i] = css[i].eventBus + reactors[i].SetEventBus(eventBuses[i]) + + blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + require.NoError(t, err) + blocksSubs = append(blocksSubs, blocksSub) + + if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake + err = css[i].blockExec.Store().Save(css[i].state) + require.NoError(t, err) + } + } + // make connected switches and start all reactors + p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("CONSENSUS", reactors[i]) + s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) + return s + }, p2p.Connect2Switches) + + // create byzantine validator + bcs := css[byzantineNode] + + // alter prevote so that the byzantine node double votes when height is 2 + bcs.doPrevote = func(height int64, round int32) { + // allow first height to happen normally so that byzantine validator is no longer proposer + if height == prevoteHeight { + bcs.Logger.Info("Sending two votes") + prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header()) + require.NoError(t, err) + prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + require.NoError(t, err) + peerList := reactors[byzantineNode].Switch.Peers().List() + bcs.Logger.Info("Getting peer list", "peers", peerList) + // send two votes to all peers (1st to one half, 2nd to another half) + for i, peer := range peerList { + if i < len(peerList)/2 { + bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer) + peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1})) + } else { + bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer) + peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2})) + } + } + } else { + bcs.Logger.Info("Behaving normally") + bcs.defaultDoPrevote(height, round) + } + } + + // start the consensus reactors + for i := 0; i < nValidators; i++ { + s := reactors[i].conS.GetState() + reactors[i].SwitchToConsensus(s, false) + } + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + // Evidence should be submitted and committed at the third height but + // we will check the first six just in case + evidenceFromEachValidator := make([]types.Evidence, nValidators) + + wg := new(sync.WaitGroup) + wg.Add(4) + for height := 1; height < 6; height++ { + for i := 0; i < nValidators; i++ { + go func(j int) { + msg := <-blocksSubs[j].Out() + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence.Evidence[0] + wg.Done() + } + }(i) + } + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + pubkey, _ := bcs.privValidator.GetPubKey() + + select { + case <-done: + for idx, ev := range evidenceFromEachValidator { + if assert.NotNil(t, ev, idx) { + ev, ok := ev.(*types.DuplicateVoteEvidence) + assert.True(t, ok) + assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) + assert.Equal(t, prevoteHeight, ev.Height()) + } + } + case <-time.After(10 * time.Second): + for i, reactor := range reactors { + t.Logf("Consensus Reactor %d\n%v", i, reactor) + } + t.Fatalf("Timed out waiting for all validators to commit first block") + } +} + // 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). // byzantine validator sends conflicting proposals into A and B, // and prevotes/precommits on both of them. // B sees a commit, A doesn't. -// Byzantine validator refuses to prevote. // Heal partition and ensure A sees the commit -func TestByzantine(t *testing.T) { +func TestByzantineConflictingProposalsWithPartition(t *testing.T) { N := 4 logger := consensusLogger().With("test", "byzantine") - css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter) + app := newCounter + css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app) defer cleanup() // give the byzantine validator a normal ticker @@ -51,17 +239,22 @@ func TestByzantine(t *testing.T) { blocksSubs := make([]types.Subscription, N) reactors := make([]p2p.Reactor, N) for i := 0; i < N; i++ { + + // enable txs so we can create different proposals + assertMempool(css[i].txNotifier).EnableTxsAvailable() // make first val byzantine if i == 0 { // NOTE: Now, test validators are MockPV, which by default doesn't // do any safety checks. css[i].privValidator.(types.MockPV).DisableChecks() - css[i].decideProposal = func(j int) func(int64, int) { - return func(height int64, round int) { + css[i].decideProposal = func(j int32) func(int64, int32) { + return func(height int64, round int32) { byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) } - }(i) - css[i].doPrevote = func(height int64, round int) {} + }(int32(i)) + // We are setting the prevote function to do nothing because the prevoting + // and precommitting are done alongside the proposal. + css[i].doPrevote = func(height int64, round int32) {} } eventBus := css[i].eventBus @@ -83,15 +276,18 @@ func TestByzantine(t *testing.T) { } reactors[i] = conRI - sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info + err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info + require.NoError(t, err) } defer func() { for _, r := range reactors { if rr, ok := r.(*ByzantineReactor); ok { - rr.reactor.Switch.Stop() + err := rr.reactor.Switch.Stop() + require.NoError(t, err) } else { - r.(*Reactor).Switch.Stop() + err := r.(*Reactor).Switch.Stop() + require.NoError(t, err) } } }() @@ -112,13 +308,13 @@ func TestByzantine(t *testing.T) { // note these must be started before the byz for i := 1; i < N; i++ { cr := reactors[i].(*Reactor) - cr.SwitchToConsensus(cr.conS.GetState(), 0) + cr.SwitchToConsensus(cr.conS.GetState(), false) } // start the byzantine state machine byzR := reactors[0].(*ByzantineReactor) s := byzR.reactor.conS.GetState() - byzR.reactor.SwitchToConsensus(s, 0) + byzR.reactor.SwitchToConsensus(s, false) // byz proposer sends one block to peers[0] // and the other block to peers[1] and peers[2]. @@ -172,26 +368,35 @@ func TestByzantine(t *testing.T) { //------------------------------- // byzantine consensus functions -func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *State, sw *p2p.Switch) { +func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { // byzantine user should create two proposals and try to split the vote. // Avoid sending on internalMsgQueue and running consensus state. // Create a new proposal block from state/txs from the mempool. block1, blockParts1 := cs.createProposalBlock() - polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartsHeader: blockParts1.Header()} + polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} proposal1 := types.NewProposal(height, round, polRound, propBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { + p1 := proposal1.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { t.Error(err) } + proposal1.Signature = p1.Signature + + // some new transactions come in (this ensures that the proposals are different) + deliverTxsRange(cs, 0, 1) + // Create a new proposal block from state/txs from the mempool. block2, blockParts2 := cs.createProposalBlock() - polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartsHeader: blockParts2.Header()} + polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} proposal2 := types.NewProposal(height, round, polRound, propBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { + p2 := proposal2.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { t.Error(err) } + proposal2.Signature = p2.Signature + block1Hash := block1.Hash() block2Hash := block2.Hash() @@ -209,7 +414,7 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Stat func sendProposalAndParts( height int64, - round int, + round int32, cs *State, peer p2p.Peer, proposal *types.Proposal, @@ -218,27 +423,27 @@ func sendProposalAndParts( ) { // proposal msg := &ProposalMessage{Proposal: proposal} - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + peer.Send(DataChannel, MustEncode(msg)) // parts - for i := 0; i < parts.Total(); i++ { + for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) msg := &BlockPartMessage{ Height: height, // This tells peer that this part applies to us. Round: round, // This tells peer that this part applies to us. Part: part, } - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + peer.Send(DataChannel, MustEncode(msg)) } // votes cs.mtx.Lock() - prevote, _ := cs.signVote(types.PrevoteType, blockHash, parts.Header()) - precommit, _ := cs.signVote(types.PrecommitType, blockHash, parts.Header()) + prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) + precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) cs.mtx.Unlock() - peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote})) - peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit})) + peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) + peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) } //---------------------------------------- @@ -268,8 +473,8 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { peer.Set(types.PeerStateKey, peerState) // Send our state to peer. - // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !br.reactor.fastSync { + // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !br.reactor.waitSync { br.reactor.sendNewRoundStepMessage(peer) } } diff --git a/consensus/codec.go b/consensus/codec.go deleted file mode 100644 index ae7dbaab2..000000000 --- a/consensus/codec.go +++ /dev/null @@ -1,15 +0,0 @@ -package consensus - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterMessages(cdc) - RegisterWALMessages(cdc) - types.RegisterBlockAmino(cdc) -} diff --git a/consensus/common_test.go b/consensus/common_test.go index f8055f01c..1fd5bb599 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/go-kit/kit/log/term" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "path" @@ -30,9 +29,11 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmsync "github.com/tendermint/tendermint/libs/sync" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" @@ -48,9 +49,11 @@ const ( type cleanupFunc func() // genesis, chain_id, priv_val -var config *cfg.Config // NOTE: must be reset for each _test.go file -var consensusReplayConfig *cfg.Config -var ensureTimeout = time.Millisecond * 100 +var ( + config *cfg.Config // NOTE: must be reset for each _test.go file + consensusReplayConfig *cfg.Config + ensureTimeout = time.Millisecond * 200 +) func ensureDir(dir string, mode os.FileMode) { if err := tmos.EnsureDir(dir, mode); err != nil { @@ -66,29 +69,31 @@ func ResetConfig(name string) *cfg.Config { // validator stub (a kvstore consensus peer we control) type validatorStub struct { - Index int // Validator index. NOTE: we don't assume validator set changes. + Index int32 // Validator index. NOTE: we don't assume validator set changes. Height int64 - Round int + Round int32 types.PrivValidator + VotingPower int64 } var testMinPower int64 = 10 -func newValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub { +func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *validatorStub { return &validatorStub{ Index: valIndex, PrivValidator: privValidator, + VotingPower: testMinPower, } } func (vs *validatorStub) signVote( - voteType types.SignedMsgType, + voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { pubKey, err := vs.PrivValidator.GetPubKey() if err != nil { - return nil, errors.Wrap(err, "can't get pubkey") + return nil, fmt.Errorf("can't get pubkey: %w", err) } vote := &types.Vote{ @@ -98,15 +103,17 @@ func (vs *validatorStub) signVote( Round: vs.Round, Timestamp: tmtime.Now(), Type: voteType, - BlockID: types.BlockID{Hash: hash, PartsHeader: header}, + BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } + v := vote.ToProto() + err = vs.PrivValidator.SignVote(config.ChainID(), v) + vote.Signature = v.Signature - err = vs.PrivValidator.SignVote(config.ChainID(), vote) return vote, err } // Sign vote for type/hash/header -func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { +func signVote(vs *validatorStub, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { v, err := vs.signVote(voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) @@ -115,7 +122,7 @@ func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, head } func signVotes( - voteType types.SignedMsgType, + voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { @@ -138,13 +145,13 @@ func incrementRound(vss ...*validatorStub) { } } -type ValidatorStubsByAddress []*validatorStub +type ValidatorStubsByPower []*validatorStub -func (vss ValidatorStubsByAddress) Len() int { +func (vss ValidatorStubsByPower) Len() int { return len(vss) } -func (vss ValidatorStubsByAddress) Less(i, j int) bool { +func (vss ValidatorStubsByPower) Less(i, j int) bool { vssi, err := vss[i].GetPubKey() if err != nil { panic(err) @@ -153,21 +160,25 @@ func (vss ValidatorStubsByAddress) Less(i, j int) bool { if err != nil { panic(err) } - return bytes.Compare(vssi.Address(), vssj.Address()) == -1 + + if vss[i].VotingPower == vss[j].VotingPower { + return bytes.Compare(vssi.Address(), vssj.Address()) == -1 + } + return vss[i].VotingPower > vss[j].VotingPower } -func (vss ValidatorStubsByAddress) Swap(i, j int) { +func (vss ValidatorStubsByPower) Swap(i, j int) { it := vss[i] vss[i] = vss[j] - vss[i].Index = i + vss[i].Index = int32(i) vss[j] = it - vss[j].Index = j + vss[j].Index = int32(j) } //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *State, height int64, round int) { +func startTestRound(cs *State, height int64, round int32) { cs.enterNewRound(height, round) cs.startRoutines(0) } @@ -177,7 +188,7 @@ func decideProposal( cs1 *State, vs *validatorStub, height int64, - round int, + round int32, ) (proposal *types.Proposal, block *types.Block) { cs1.mtx.Lock() block, blockParts := cs1.createProposalBlock() @@ -189,11 +200,15 @@ func decideProposal( } // Make proposal - polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()} + polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal = types.NewProposal(height, round, polRound, propBlockID) - if err := vs.SignProposal(chainID, proposal); err != nil { + p := proposal.ToProto() + if err := vs.SignProposal(chainID, p); err != nil { panic(err) } + + proposal.Signature = p.Signature + return } @@ -205,7 +220,7 @@ func addVotes(to *State, votes ...*types.Vote) { func signAddVotes( to *State, - voteType types.SignedMsgType, + voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub, @@ -214,7 +229,7 @@ func signAddVotes( addVotes(to, votes...) } -func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, blockHash []byte) { +func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { prevotes := cs.Votes.Prevotes(round) pubKey, err := privVal.GetPubKey() require.NoError(t, err) @@ -252,7 +267,7 @@ func validatePrecommit( t *testing.T, cs *State, thisRound, - lockRound int, + lockRound int32, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte, @@ -294,14 +309,13 @@ func validatePrecommit( lockedBlockHash)) } } - } func validatePrevoteAndPrecommit( t *testing.T, cs *State, thisRound, - lockRound int, + lockRound int32, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte, @@ -361,7 +375,7 @@ func newStateWithConfigAndBlockStore( blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(sync.Mutex) + mtx := new(tmsync.Mutex) proxyAppConnMem := abcicli.NewLocalClient(mtx, app) proxyAppConnCon := abcicli.NewLocalClient(mtx, app) @@ -372,20 +386,26 @@ func newStateWithConfigAndBlockStore( mempool.EnableTxsAvailable() } - // mock the evidence pool - evpool := sm.MockEvidencePool{} + evpool := sm.EmptyEvidencePool{} // Make State stateDB := blockDB - sm.SaveState(stateDB, state) //for save height 1's validators info - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(state); err != nil { // for save height 1's validators info + panic(err) + } + + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) eventBus := types.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) - eventBus.Start() + err := eventBus.Start() + if err != nil { + panic(err) + } cs.SetEventBus(eventBus) return cs } @@ -394,7 +414,10 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV { privValidatorKeyFile := config.PrivValidatorKeyFile() ensureDir(filepath.Dir(privValidatorKeyFile), 0700) privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + panic(err) + } privValidator.Reset() return privValidator } @@ -408,7 +431,7 @@ func randState(nValidators int) (*State, []*validatorStub) { cs := newState(state, privVals[0], counter.NewApplication(true)) for i := 0; i < nValidators; i++ { - vss[i] = newValidatorStub(privVals[i], i) + vss[i] = newValidatorStub(privVals[i], int32(i)) } // since cs1 starts at 1 incrementHeight(vss[1:]...) @@ -457,7 +480,7 @@ func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) { "We should be stuck waiting, not receiving NewTimeout event") } -func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int, timeout time.Duration, errorMessage string) { +func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration, errorMessage string) { select { case <-time.After(timeout): panic(errorMessage) @@ -477,7 +500,7 @@ func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int, timeout } } -func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) { +func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewRound event") @@ -496,13 +519,13 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int) { } } -func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int, timeout int64) { +func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) { timeoutDuration := time.Duration(timeout*10) * time.Nanosecond ensureNewEvent(timeoutCh, height, round, timeoutDuration, "Timeout expired while waiting for NewTimeout event") } -func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int) { +func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewProposal event") @@ -521,7 +544,7 @@ func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round i } } -func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int) { +func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int32) { ensureNewEvent(validBlockCh, height, round, ensureTimeout, "Timeout expired while waiting for NewValidBlock event") } @@ -561,12 +584,12 @@ func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHa } } -func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int) { +func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32) { ensureNewEvent(unlockCh, height, round, ensureTimeout, "Timeout expired while waiting for NewUnlock event") } -func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int, propID types.BlockID) { +func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewProposal event") @@ -583,21 +606,21 @@ func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int, panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) } if !proposalEvent.BlockID.Equals(propID) { - panic("Proposed block does not match expected block") + panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID)) } } } -func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int) { - ensureVote(voteCh, height, round, types.PrecommitType) +func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int32) { + ensureVote(voteCh, height, round, tmproto.PrecommitType) } -func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int) { - ensureVote(voteCh, height, round, types.PrevoteType) +func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) { + ensureVote(voteCh, height, round, tmproto.PrevoteType) } -func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int, - voteType types.SignedMsgType) { +func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32, + voteType tmproto.SignedMsgType) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewVote event") @@ -620,6 +643,14 @@ func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int, } } +func ensurePrecommitTimeout(ch <-chan tmpubsub.Message) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for the Precommit to Timeout") + case <-ch: + } +} + func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) { select { case <-time.After(ensureTimeout): @@ -652,7 +683,8 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { @@ -689,7 +721,8 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -709,7 +742,10 @@ func randConsensusNetWithPeers( panic(err) } - privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + if err != nil { + panic(err) + } } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) @@ -719,7 +755,7 @@ func randConsensusNetWithPeers( state.Version.Consensus.App = kvstore.ProtocolVersion } app.InitChain(abci.RequestInitChain{Validators: vals}) - //sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above + // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above css[i] = newStateWithConfig(thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) @@ -758,9 +794,10 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G sort.Sort(types.PrivValidatorsByAddress(privValidators)) return &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - ChainID: config.ChainID(), - Validators: validators, + GenesisTime: tmtime.Now(), + InitialHeight: 1, + ChainID: config.ChainID(), + Validators: validators, }, privValidators } diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go new file mode 100644 index 000000000..907693c57 --- /dev/null +++ b/consensus/invalid_test.go @@ -0,0 +1,100 @@ +package consensus + +import ( + "testing" + + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +//---------------------------------------------- +// byzantine failures + +// one byz val sends a precommit for a random block at each height +// Ensure a testnet makes blocks +func TestReactorInvalidPrecommit(t *testing.T) { + N := 4 + css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + defer cleanup() + + for i := 0; i < 4; i++ { + ticker := NewTimeoutTicker() + ticker.SetLogger(css[i].Logger) + css[i].SetTimeoutTicker(ticker) + + } + + reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) + + // this val sends a random precommit at each height + byzValIdx := 0 + byzVal := css[byzValIdx] + byzR := reactors[byzValIdx] + + // update the doPrevote function to just send a valid precommit for a random block + // and otherwise disable the priv validator + byzVal.mtx.Lock() + pv := byzVal.privValidator + byzVal.doPrevote = func(height int64, round int32) { + invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv) + } + byzVal.mtx.Unlock() + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) + + // wait for a bunch of blocks + // TODO: make this tighter by ensuring the halt happens by block 2 + for i := 0; i < 10; i++ { + timeoutWaitGroup(t, N, func(j int) { + <-blocksSubs[j].Out() + }, css) + } +} + +func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch, pv types.PrivValidator) { + // routine to: + // - precommit for a random block + // - send precommit to all peers + // - disable privValidator (so we don't do normal precommits) + go func() { + cs.mtx.Lock() + cs.privValidator = pv + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + panic(err) + } + addr := pubKey.Address() + valIndex, _ := cs.Validators.GetByAddress(addr) + + // precommit a random block + blockHash := bytes.HexBytes(tmrand.Bytes(32)) + precommit := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: valIndex, + Height: cs.Height, + Round: cs.Round, + Timestamp: cs.voteTime(), + Type: tmproto.PrecommitType, + BlockID: types.BlockID{ + Hash: blockHash, + PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}}, + } + p := precommit.ToProto() + err = cs.privValidator.SignVote(cs.state.ChainID, p) + if err != nil { + t.Error(err) + } + precommit.Signature = p.Signature + cs.privValidator = nil // disable priv val so we don't do normal votes + cs.mtx.Unlock() + + peers := sw.Peers().List() + for _, peer := range peers { + cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer) + peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) + } + }() +} diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 8e268d444..db9662acb 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -45,13 +46,15 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) + config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, false, 10) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) + assertMempool(cs.txNotifier).EnableTxsAvailable() - height, round := cs.Height, cs.Round + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) + startTestRound(cs, cs.Height, cs.Round) ensureNewEventOnChannel(newBlockCh) // first block gets committed ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... @@ -110,8 +113,10 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) blockDB := dbm.NewMemDB() + stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) - sm.SaveState(blockDB, state) + err := stateStore.Save(state) + require.NoError(t, err) newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 @@ -133,8 +138,10 @@ func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() blockDB := dbm.NewMemDB() + stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) - sm.SaveState(blockDB, state) + err := stateStore.Save(state) + require.NoError(t, err) // increment the counter by 1 txBytes := make([]byte, 8) diff --git a/consensus/metrics.go b/consensus/metrics.go index 5fa27118a..bbd823a3f 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -43,7 +43,7 @@ type Metrics struct { ByzantineValidatorsPower metrics.Gauge // Time between this and the last block. - BlockIntervalSeconds metrics.Gauge + BlockIntervalSeconds metrics.Histogram // Number of transactions. NumTxs metrics.Gauge @@ -55,6 +55,8 @@ type Metrics struct { CommittedHeight metrics.Gauge // Whether or not a node is fast syncing. 1 if yes, 0 if no. FastSyncing metrics.Gauge + // Whether or not a node is state syncing. 1 if yes, 0 if no. + StateSyncing metrics.Gauge // Number of blockparts transmitted by peer. BlockParts metrics.Counter @@ -136,14 +138,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "byzantine_validators_power", Help: "Total power of the byzantine validators.", }, labels).With(labelsAndValues...), - - BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "block_interval_seconds", Help: "Time between this and the last block.", }, labels).With(labelsAndValues...), - NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -174,6 +174,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "fast_syncing", Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -201,13 +207,14 @@ func NopMetrics() *Metrics { ByzantineValidators: discard.NewGauge(), ByzantineValidatorsPower: discard.NewGauge(), - BlockIntervalSeconds: discard.NewGauge(), + BlockIntervalSeconds: discard.NewHistogram(), NumTxs: discard.NewGauge(), BlockSizeBytes: discard.NewGauge(), TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), FastSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), BlockParts: discard.NewCounter(), } } diff --git a/consensus/msgs.go b/consensus/msgs.go new file mode 100644 index 000000000..4de96b5f4 --- /dev/null +++ b/consensus/msgs.go @@ -0,0 +1,377 @@ +package consensus + +import ( + "errors" + "fmt" + + "github.com/gogo/protobuf/proto" + + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/bits" + tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// MsgToProto takes a consensus message type and returns the proto defined consensus message +func MsgToProto(msg Message) (*tmcons.Message, error) { + if msg == nil { + return nil, errors.New("consensus: message is nil") + } + var pb tmcons.Message + + switch msg := msg.(type) { + case *NewRoundStepMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_NewRoundStep{ + NewRoundStep: &tmcons.NewRoundStep{ + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, + }, + }, + } + case *NewValidBlockMessage: + pbPartSetHeader := msg.BlockPartSetHeader.ToProto() + pbBits := msg.BlockParts.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_NewValidBlock{ + NewValidBlock: &tmcons.NewValidBlock{ + Height: msg.Height, + Round: msg.Round, + BlockPartSetHeader: pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.IsCommit, + }, + }, + } + case *ProposalMessage: + pbP := msg.Proposal.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Proposal{ + Proposal: &tmcons.Proposal{ + Proposal: *pbP, + }, + }, + } + case *ProposalPOLMessage: + pbBits := msg.ProposalPOL.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_ProposalPol{ + ProposalPol: &tmcons.ProposalPOL{ + Height: msg.Height, + ProposalPolRound: msg.ProposalPOLRound, + ProposalPol: *pbBits, + }, + }, + } + case *BlockPartMessage: + parts, err := msg.Part.ToProto() + if err != nil { + return nil, fmt.Errorf("msg to proto error: %w", err) + } + pb = tmcons.Message{ + Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{ + Height: msg.Height, + Round: msg.Round, + Part: *parts, + }, + }, + } + case *VoteMessage: + vote := msg.Vote.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Vote{ + Vote: &tmcons.Vote{ + Vote: vote, + }, + }, + } + case *HasVoteMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_HasVote{ + HasVote: &tmcons.HasVote{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, + }, + }, + } + case *VoteSetMaj23Message: + bi := msg.BlockID.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_VoteSetMaj23{ + VoteSetMaj23: &tmcons.VoteSetMaj23{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + }, + } + case *VoteSetBitsMessage: + bi := msg.BlockID.ToProto() + bits := msg.Votes.ToProto() + + vsb := &tmcons.Message_VoteSetBits{ + VoteSetBits: &tmcons.VoteSetBits{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + } + + if bits != nil { + vsb.VoteSetBits.Votes = *bits + } + + pb = tmcons.Message{ + Sum: vsb, + } + + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + return &pb, nil +} + +// MsgFromProto takes a consensus proto message and returns the native go type +func MsgFromProto(msg *tmcons.Message) (Message, error) { + if msg == nil { + return nil, errors.New("consensus: nil message") + } + var pb Message + + switch msg := msg.Sum.(type) { + case *tmcons.Message_NewRoundStep: + rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = &NewRoundStepMessage{ + Height: msg.NewRoundStep.Height, + Round: msg.NewRoundStep.Round, + Step: cstypes.RoundStepType(rs), + SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime, + LastCommitRound: msg.NewRoundStep.LastCommitRound, + } + case *tmcons.Message_NewValidBlock: + pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + if err != nil { + return nil, fmt.Errorf("parts to proto error: %w", err) + } + + pbBits := new(bits.BitArray) + pbBits.FromProto(msg.NewValidBlock.BlockParts) + + pb = &NewValidBlockMessage{ + Height: msg.NewValidBlock.Height, + Round: msg.NewValidBlock.Round, + BlockPartSetHeader: *pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.NewValidBlock.IsCommit, + } + case *tmcons.Message_Proposal: + pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + if err != nil { + return nil, fmt.Errorf("proposal msg to proto error: %w", err) + } + + pb = &ProposalMessage{ + Proposal: pbP, + } + case *tmcons.Message_ProposalPol: + pbBits := new(bits.BitArray) + pbBits.FromProto(&msg.ProposalPol.ProposalPol) + pb = &ProposalPOLMessage{ + Height: msg.ProposalPol.Height, + ProposalPOLRound: msg.ProposalPol.ProposalPolRound, + ProposalPOL: pbBits, + } + case *tmcons.Message_BlockPart: + parts, err := types.PartFromProto(&msg.BlockPart.Part) + if err != nil { + return nil, fmt.Errorf("blockpart msg to proto error: %w", err) + } + pb = &BlockPartMessage{ + Height: msg.BlockPart.Height, + Round: msg.BlockPart.Round, + Part: parts, + } + case *tmcons.Message_Vote: + vote, err := types.VoteFromProto(msg.Vote.Vote) + if err != nil { + return nil, fmt.Errorf("vote msg to proto error: %w", err) + } + + pb = &VoteMessage{ + Vote: vote, + } + case *tmcons.Message_HasVote: + pb = &HasVoteMessage{ + Height: msg.HasVote.Height, + Round: msg.HasVote.Round, + Type: msg.HasVote.Type, + Index: msg.HasVote.Index, + } + case *tmcons.Message_VoteSetMaj23: + bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + if err != nil { + return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) + } + pb = &VoteSetMaj23Message{ + Height: msg.VoteSetMaj23.Height, + Round: msg.VoteSetMaj23.Round, + Type: msg.VoteSetMaj23.Type, + BlockID: *bi, + } + case *tmcons.Message_VoteSetBits: + bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + if err != nil { + return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err) + } + bits := new(bits.BitArray) + bits.FromProto(&msg.VoteSetBits.Votes) + + pb = &VoteSetBitsMessage{ + Height: msg.VoteSetBits.Height, + Round: msg.VoteSetBits.Round, + Type: msg.VoteSetBits.Type, + BlockID: *bi, + Votes: bits, + } + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + if err := pb.ValidateBasic(); err != nil { + return nil, err + } + + return pb, nil +} + +// MustEncode takes the reactors msg, makes it proto and marshals it +// this mimics `MustMarshalBinaryBare` in that is panics on error +func MustEncode(msg Message) []byte { + pb, err := MsgToProto(msg) + if err != nil { + panic(err) + } + enc, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + return enc +} + +// WALToProto takes a WAL message and return a proto walMessage and error +func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { + var pb tmcons.WALMessage + + switch msg := msg.(type) { + case types.EventDataRoundState: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EventDataRoundState{ + EventDataRoundState: &tmproto.EventDataRoundState{ + Height: msg.Height, + Round: msg.Round, + Step: msg.Step, + }, + }, + } + case msgInfo: + consMsg, err := MsgToProto(msg.Msg) + if err != nil { + return nil, err + } + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_MsgInfo{ + MsgInfo: &tmcons.MsgInfo{ + Msg: *consMsg, + PeerID: string(msg.PeerID), + }, + }, + } + case timeoutInfo: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_TimeoutInfo{ + TimeoutInfo: &tmcons.TimeoutInfo{ + Duration: msg.Duration, + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + }, + }, + } + case EndHeightMessage: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EndHeight{ + EndHeight: &tmcons.EndHeight{ + Height: msg.Height, + }, + }, + } + default: + return nil, fmt.Errorf("to proto: wal message not recognized: %T", msg) + } + + return &pb, nil +} + +// WALFromProto takes a proto wal message and return a consensus walMessage and error +func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { + if msg == nil { + return nil, errors.New("nil WAL message") + } + var pb WALMessage + + switch msg := msg.Sum.(type) { + case *tmcons.WALMessage_EventDataRoundState: + pb = types.EventDataRoundState{ + Height: msg.EventDataRoundState.Height, + Round: msg.EventDataRoundState.Round, + Step: msg.EventDataRoundState.Step, + } + case *tmcons.WALMessage_MsgInfo: + walMsg, err := MsgFromProto(&msg.MsgInfo.Msg) + if err != nil { + return nil, fmt.Errorf("msgInfo from proto error: %w", err) + } + pb = msgInfo{ + Msg: walMsg, + PeerID: p2p.ID(msg.MsgInfo.PeerID), + } + + case *tmcons.WALMessage_TimeoutInfo: + tis, err := tmmath.SafeConvertUint8(int64(msg.TimeoutInfo.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = timeoutInfo{ + Duration: msg.TimeoutInfo.Duration, + Height: msg.TimeoutInfo.Height, + Round: msg.TimeoutInfo.Round, + Step: cstypes.RoundStepType(tis), + } + return pb, nil + case *tmcons.WALMessage_EndHeight: + pb := EndHeightMessage{ + Height: msg.EndHeight.Height, + } + return pb, nil + default: + return nil, fmt.Errorf("from proto: wal message not recognized: %T", msg) + } + return pb, nil +} diff --git a/consensus/msgs_test.go b/consensus/msgs_test.go new file mode 100644 index 000000000..b1f32e67d --- /dev/null +++ b/consensus/msgs_test.go @@ -0,0 +1,427 @@ +package consensus + +import ( + "encoding/hex" + "math" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/bits" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +func TestMsgToProto(t *testing.T) { + psh := types.PartSetHeader{ + Total: 1, + Hash: tmrand.Bytes(32), + } + pbPsh := psh.ToProto() + bi := types.BlockID{ + Hash: tmrand.Bytes(32), + PartSetHeader: psh, + } + pbBi := bi.ToProto() + bits := bits.NewBitArray(1) + pbBits := bits.ToProto() + + parts := types.Part{ + Index: 1, + Bytes: []byte("test"), + Proof: merkle.Proof{ + Total: 1, + Index: 1, + LeafHash: tmrand.Bytes(32), + Aunts: [][]byte{}, + }, + } + pbParts, err := parts.ToProto() + require.NoError(t, err) + + proposal := types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 1, + POLRound: 1, + BlockID: bi, + Timestamp: time.Now(), + Signature: tmrand.Bytes(20), + } + pbProposal := proposal.ToProto() + + pv := types.NewMockPV() + pk, err := pv.GetPubKey() + require.NoError(t, err) + val := types.NewValidator(pk, 100) + + vote, err := types.MakeVote( + 1, types.BlockID{}, &types.ValidatorSet{Proposer: val, Validators: []*types.Validator{val}}, + pv, "chainID", time.Now()) + require.NoError(t, err) + pbVote := vote.ToProto() + + testsCases := []struct { + testName string + msg Message + want *tmcons.Message + wantErr bool + }{ + {"successful NewRoundStepMessage", &NewRoundStepMessage{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }, &tmcons.Message{ + Sum: &tmcons.Message_NewRoundStep{ + NewRoundStep: &tmcons.NewRoundStep{ + Height: 2, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 2, + }, + }, + }, false}, + + {"successful NewValidBlockMessage", &NewValidBlockMessage{ + Height: 1, + Round: 1, + BlockPartSetHeader: psh, + BlockParts: bits, + IsCommit: false, + }, &tmcons.Message{ + Sum: &tmcons.Message_NewValidBlock{ + NewValidBlock: &tmcons.NewValidBlock{ + Height: 1, + Round: 1, + BlockPartSetHeader: pbPsh, + BlockParts: pbBits, + IsCommit: false, + }, + }, + }, false}, + {"successful BlockPartMessage", &BlockPartMessage{ + Height: 100, + Round: 1, + Part: &parts, + }, &tmcons.Message{ + Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, + }, + }, false}, + {"successful ProposalPOLMessage", &ProposalPOLMessage{ + Height: 1, + ProposalPOLRound: 1, + ProposalPOL: bits, + }, &tmcons.Message{ + Sum: &tmcons.Message_ProposalPol{ + ProposalPol: &tmcons.ProposalPOL{ + Height: 1, + ProposalPolRound: 1, + ProposalPol: *pbBits, + }, + }}, false}, + {"successful ProposalMessage", &ProposalMessage{ + Proposal: &proposal, + }, &tmcons.Message{ + Sum: &tmcons.Message_Proposal{ + Proposal: &tmcons.Proposal{ + Proposal: *pbProposal, + }, + }, + }, false}, + {"successful VoteMessage", &VoteMessage{ + Vote: vote, + }, &tmcons.Message{ + Sum: &tmcons.Message_Vote{ + Vote: &tmcons.Vote{ + Vote: pbVote, + }, + }, + }, false}, + {"successful VoteSetMaj23", &VoteSetMaj23Message{ + Height: 1, + Round: 1, + Type: 1, + BlockID: bi, + }, &tmcons.Message{ + Sum: &tmcons.Message_VoteSetMaj23{ + VoteSetMaj23: &tmcons.VoteSetMaj23{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + }, + }, + }, false}, + {"successful VoteSetBits", &VoteSetBitsMessage{ + Height: 1, + Round: 1, + Type: 1, + BlockID: bi, + Votes: bits, + }, &tmcons.Message{ + Sum: &tmcons.Message_VoteSetBits{ + VoteSetBits: &tmcons.VoteSetBits{ + Height: 1, + Round: 1, + Type: 1, + BlockID: pbBi, + Votes: *pbBits, + }, + }, + }, false}, + {"failure", nil, &tmcons.Message{}, true}, + } + for _, tt := range testsCases { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + pb, err := MsgToProto(tt.msg) + if tt.wantErr == true { + assert.Equal(t, err != nil, tt.wantErr) + return + } + assert.EqualValues(t, tt.want, pb, tt.testName) + + msg, err := MsgFromProto(pb) + + if !tt.wantErr { + require.NoError(t, err) + bcm := assert.Equal(t, tt.msg, msg, tt.testName) + assert.True(t, bcm, tt.testName) + } else { + require.Error(t, err, tt.testName) + } + }) + } +} + +func TestWALMsgProto(t *testing.T) { + + parts := types.Part{ + Index: 1, + Bytes: []byte("test"), + Proof: merkle.Proof{ + Total: 1, + Index: 1, + LeafHash: tmrand.Bytes(32), + Aunts: [][]byte{}, + }, + } + pbParts, err := parts.ToProto() + require.NoError(t, err) + + testsCases := []struct { + testName string + msg WALMessage + want *tmcons.WALMessage + wantErr bool + }{ + {"successful EventDataRoundState", types.EventDataRoundState{ + Height: 2, + Round: 1, + Step: "ronies", + }, &tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EventDataRoundState{ + EventDataRoundState: &tmproto.EventDataRoundState{ + Height: 2, + Round: 1, + Step: "ronies", + }, + }, + }, false}, + {"successful msgInfo", msgInfo{ + Msg: &BlockPartMessage{ + Height: 100, + Round: 1, + Part: &parts, + }, + PeerID: p2p.ID("string"), + }, &tmcons.WALMessage{ + Sum: &tmcons.WALMessage_MsgInfo{ + MsgInfo: &tmcons.MsgInfo{ + Msg: tmcons.Message{ + Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{ + Height: 100, + Round: 1, + Part: *pbParts, + }, + }, + }, + PeerID: "string", + }, + }, + }, false}, + {"successful timeoutInfo", timeoutInfo{ + Duration: time.Duration(100), + Height: 1, + Round: 1, + Step: 1, + }, &tmcons.WALMessage{ + Sum: &tmcons.WALMessage_TimeoutInfo{ + TimeoutInfo: &tmcons.TimeoutInfo{ + Duration: time.Duration(100), + Height: 1, + Round: 1, + Step: 1, + }, + }, + }, false}, + {"successful EndHeightMessage", EndHeightMessage{ + Height: 1, + }, &tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EndHeight{ + EndHeight: &tmcons.EndHeight{ + Height: 1, + }, + }, + }, false}, + {"failure", nil, &tmcons.WALMessage{}, true}, + } + for _, tt := range testsCases { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + pb, err := WALToProto(tt.msg) + if tt.wantErr == true { + assert.Equal(t, err != nil, tt.wantErr) + return + } + assert.EqualValues(t, tt.want, pb, tt.testName) + + msg, err := WALFromProto(pb) + + if !tt.wantErr { + require.NoError(t, err) + assert.Equal(t, tt.msg, msg, tt.testName) // need the concrete type as WAL Message is a empty interface + } else { + require.Error(t, err, tt.testName) + } + }) + } +} + +// nolint:lll //ignore line length for tests +func TestConsMsgsVectors(t *testing.T) { + date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC) + psh := types.PartSetHeader{ + Total: 1, + Hash: []byte("add_more_exclamation_marks_code-"), + } + pbPsh := psh.ToProto() + + bi := types.BlockID{ + Hash: []byte("add_more_exclamation_marks_code-"), + PartSetHeader: psh, + } + pbBi := bi.ToProto() + bits := bits.NewBitArray(1) + pbBits := bits.ToProto() + + parts := types.Part{ + Index: 1, + Bytes: []byte("test"), + Proof: merkle.Proof{ + Total: 1, + Index: 1, + LeafHash: []byte("add_more_exclamation_marks_code-"), + Aunts: [][]byte{}, + }, + } + pbParts, err := parts.ToProto() + require.NoError(t, err) + + proposal := types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 1, + POLRound: 1, + BlockID: bi, + Timestamp: date, + Signature: []byte("add_more_exclamation"), + } + pbProposal := proposal.ToProto() + + v := &types.Vote{ + ValidatorAddress: []byte("add_more_exclamation"), + ValidatorIndex: 1, + Height: 1, + Round: 0, + Timestamp: date, + Type: tmproto.PrecommitType, + BlockID: bi, + } + vpb := v.ToProto() + + testCases := []struct { + testName string + cMsg proto.Message + expBytes string + }{ + {"NewRoundStep", &tmcons.Message{Sum: &tmcons.Message_NewRoundStep{NewRoundStep: &tmcons.NewRoundStep{ + Height: 1, + Round: 1, + Step: 1, + SecondsSinceStartTime: 1, + LastCommitRound: 1, + }}}, "0a0a08011001180120012801"}, + {"NewRoundStep Max", &tmcons.Message{Sum: &tmcons.Message_NewRoundStep{NewRoundStep: &tmcons.NewRoundStep{ + Height: math.MaxInt64, + Round: math.MaxInt32, + Step: math.MaxUint32, + SecondsSinceStartTime: math.MaxInt64, + LastCommitRound: math.MaxInt32, + }}}, "0a2608ffffffffffffffff7f10ffffffff0718ffffffff0f20ffffffffffffffff7f28ffffffff07"}, + {"NewValidBlock", &tmcons.Message{Sum: &tmcons.Message_NewValidBlock{ + NewValidBlock: &tmcons.NewValidBlock{ + Height: 1, Round: 1, BlockPartSetHeader: pbPsh, BlockParts: pbBits, IsCommit: false}}}, + "1231080110011a24080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d22050801120100"}, + {"Proposal", &tmcons.Message{Sum: &tmcons.Message_Proposal{Proposal: &tmcons.Proposal{Proposal: *pbProposal}}}, + "1a720a7008201001180120012a480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d320608c0b89fdc053a146164645f6d6f72655f6578636c616d6174696f6e"}, + {"ProposalPol", &tmcons.Message{Sum: &tmcons.Message_ProposalPol{ + ProposalPol: &tmcons.ProposalPOL{Height: 1, ProposalPolRound: 1}}}, + "2206080110011a00"}, + {"BlockPart", &tmcons.Message{Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{Height: 1, Round: 1, Part: *pbParts}}}, + "2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, + {"Vote", &tmcons.Message{Sum: &tmcons.Message_Vote{ + Vote: &tmcons.Vote{Vote: vpb}}}, + "32700a6e0802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a0608c0b89fdc0532146164645f6d6f72655f6578636c616d6174696f6e3801"}, + {"HasVote", &tmcons.Message{Sum: &tmcons.Message_HasVote{ + HasVote: &tmcons.HasVote{Height: 1, Round: 1, Type: tmproto.PrevoteType, Index: 1}}}, + "3a080801100118012001"}, + {"HasVote", &tmcons.Message{Sum: &tmcons.Message_HasVote{ + HasVote: &tmcons.HasVote{Height: math.MaxInt64, Round: math.MaxInt32, + Type: tmproto.PrevoteType, Index: math.MaxInt32}}}, + "3a1808ffffffffffffffff7f10ffffffff07180120ffffffff07"}, + {"VoteSetMaj23", &tmcons.Message{Sum: &tmcons.Message_VoteSetMaj23{ + VoteSetMaj23: &tmcons.VoteSetMaj23{Height: 1, Round: 1, Type: tmproto.PrevoteType, BlockID: pbBi}}}, + "425008011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, + {"VoteSetBits", &tmcons.Message{Sum: &tmcons.Message_VoteSetBits{ + VoteSetBits: &tmcons.VoteSetBits{Height: 1, Round: 1, Type: tmproto.PrevoteType, BlockID: pbBi, Votes: *pbBits}}}, + "4a5708011001180122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d2a050801120100"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + bz, err := proto.Marshal(tc.cMsg) + require.NoError(t, err) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz)) + }) + } +} diff --git a/consensus/reactor.go b/consensus/reactor.go index c8c344ac8..9fb41c37c 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -1,20 +1,23 @@ package consensus import ( + "errors" "fmt" "reflect" "sync" "time" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" + "github.com/gogo/protobuf/proto" cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -40,24 +43,23 @@ type Reactor struct { conS *State - mtx sync.RWMutex - fastSync bool + mtx tmsync.RWMutex + waitSync bool eventBus *types.EventBus - metrics *Metrics + Metrics *Metrics } type ReactorOption func(*Reactor) // NewReactor returns a new Reactor with the given // consensusState. -func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) *Reactor { +func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { conR := &Reactor{ conS: consensusState, - fastSync: fastSync, - metrics: NopMetrics(), + waitSync: waitSync, + Metrics: NopMetrics(), } - conR.updateFastSyncingMetric() conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) for _, option := range options { @@ -70,14 +72,14 @@ func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) // OnStart implements BaseService by subscribing to events, which later will be // broadcasted to other peers and starting state if we're not in fast sync. func (conR *Reactor) OnStart() error { - conR.Logger.Info("Reactor ", "fastSync", conR.FastSync()) + conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync()) // start routine that computes peer statistics for evaluating peer quality go conR.peerStatsRoutine() conR.subscribeToBroadcastEvents() - if !conR.FastSync() { + if !conR.WaitSync() { err := conR.conS.Start() if err != nil { return err @@ -91,28 +93,35 @@ func (conR *Reactor) OnStart() error { // state. func (conR *Reactor) OnStop() { conR.unsubscribeFromBroadcastEvents() - conR.conS.Stop() - if !conR.FastSync() { + if err := conR.conS.Stop(); err != nil { + conR.Logger.Error("Error stopping consensus state", "err", err) + } + if !conR.WaitSync() { conR.conS.Wait() } } // SwitchToConsensus switches from fast_sync mode to consensus mode. // It resets the state, turns off fast_sync, and starts the consensus state-machine -func (conR *Reactor) SwitchToConsensus(state sm.State, blocksSynced uint64) { +func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { conR.Logger.Info("SwitchToConsensus") - conR.conS.reconstructLastCommit(state) - // NOTE: The line below causes broadcastNewRoundStepRoutine() to - // broadcast a NewRoundStepMessage. + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + conR.conS.reconstructLastCommit(state) + } + + // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a + // NewRoundStepMessage. conR.conS.updateToState(state) conR.mtx.Lock() - conR.fastSync = false + conR.waitSync = false conR.mtx.Unlock() - conR.metrics.FastSyncing.Set(0) + conR.Metrics.FastSyncing.Set(0) + conR.Metrics.StateSyncing.Set(0) - if blocksSynced > 0 { - // dont bother with the WAL if we fast synced + if skipWAL { conR.conS.doWALCatchup = false } err := conR.conS.Start() @@ -187,7 +196,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !conR.FastSync() { + if !conR.WaitSync() { conR.sendNewRoundStepMessage(peer) } } @@ -211,6 +220,8 @@ func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Peer state updates can happen in parallel, but processing of // proposals, block parts, and votes are ordered by the receiveRoutine // NOTE: blocks on consensus state for proposals, block parts, and votes +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { if !conR.IsRunning() { conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) @@ -242,6 +253,14 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case StateChannel: switch msg := msg.(type) { case *NewRoundStepMessage: + conR.conS.mtx.Lock() + initialHeight := conR.conS.state.InitialHeight + conR.conS.mtx.Unlock() + if err = msg.ValidateHeight(initialHeight); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } ps.ApplyNewRoundStepMessage(msg) case *NewValidBlockMessage: ps.ApplyNewValidBlockMessage(msg) @@ -265,14 +284,14 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { // (and consequently shows which we don't have) var ourVotes *bits.BitArray switch msg.Type { - case types.PrevoteType: + case tmproto.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.PrecommitType: + case tmproto.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } - src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ + src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{ Height: msg.Height, Round: msg.Round, Type: msg.Type, @@ -284,8 +303,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case DataChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -295,16 +314,16 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: - ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) - conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) + ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) + conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -325,8 +344,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case VoteSetBitsChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -339,9 +358,9 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { if height == msg.Height { var ourVotes *bits.BitArray switch msg.Type { - case types.PrevoteType: + case tmproto.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.PrecommitType: + case tmproto.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") @@ -366,11 +385,11 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) { conR.conS.SetEventBus(b) } -// FastSync returns whether the consensus reactor is in fast-sync mode. -func (conR *Reactor) FastSync() bool { +// WaitSync returns whether the consensus reactor is waiting for state/fast sync. +func (conR *Reactor) WaitSync() bool { conR.mtx.RLock() defer conR.mtx.RUnlock() - return conR.fastSync + return conR.waitSync } //-------------------------------------- @@ -380,20 +399,26 @@ func (conR *Reactor) FastSync() bool { // them to peers upon receiving. func (conR *Reactor) subscribeToBroadcastEvents() { const subscriber = "consensus-reactor" - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, func(data tmevents.EventData) { conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) - }) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, func(data tmevents.EventData) { conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) - }) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, func(data tmevents.EventData) { conR.broadcastHasVoteMessage(data.(*types.Vote)) - }) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } } @@ -404,18 +429,18 @@ func (conR *Reactor) unsubscribeFromBroadcastEvents() { func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { nrsMsg := makeRoundStepMessage(rs) - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) + conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg)) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { csMsg := &NewValidBlockMessage{ - Height: rs.Height, - Round: rs.Round, - BlockPartsHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), - IsCommit: rs.Step == cstypes.RoundStepCommit, + Height: rs.Height, + Round: rs.Round, + BlockPartSetHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + IsCommit: rs.Step == cstypes.RoundStepCommit, } - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) + conR.Switch.Broadcast(StateChannel, MustEncode(csMsg)) } // Broadcasts HasVoteMessage to peers that care. @@ -426,7 +451,7 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { Type: vote.Type, Index: vote.ValidatorIndex, } - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) + conR.Switch.Broadcast(StateChannel, MustEncode(msg)) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -452,7 +477,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) Height: rs.Height, Round: rs.Round, Step: rs.Step, - SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), + SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.GetRound(), } return @@ -461,7 +486,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.conS.GetRoundState() nrsMsg := makeRoundStepMessage(rs) - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) + peer.Send(StateChannel, MustEncode(nrsMsg)) } func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { @@ -478,7 +503,7 @@ OUTER_LOOP: prs := ps.GetRoundState() // Send proposal Block parts? - if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) { + if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { part := rs.ProposalBlockParts.GetPart(index) msg := &BlockPartMessage{ @@ -487,7 +512,7 @@ OUTER_LOOP: Part: part, } logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + if peer.Send(DataChannel, MustEncode(msg)) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -506,7 +531,7 @@ OUTER_LOOP: "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) } else { - ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) + ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) } // continue the loop since prs is a copy and not effected by this initialization continue OUTER_LOOP @@ -517,7 +542,8 @@ OUTER_LOOP: // If height and round don't match, sleep. if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) + // logger.Info("Peer Height|Round mismatch, sleeping", + // "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } @@ -533,7 +559,7 @@ OUTER_LOOP: { msg := &ProposalMessage{Proposal: rs.Proposal} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + if peer.Send(DataChannel, MustEncode(msg)) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } @@ -549,7 +575,7 @@ OUTER_LOOP: ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), } logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) + peer.Send(DataChannel, MustEncode(msg)) } continue OUTER_LOOP } @@ -571,9 +597,9 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) return - } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { - logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", - "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) + } else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) { + logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping", + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } @@ -581,7 +607,7 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) if part == nil { logger.Error("Could not load part", "index", index, - "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } @@ -592,14 +618,14 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt Part: part, } logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + if peer.Send(DataChannel, MustEncode(msg)) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } else { logger.Debug("Sending block part for catchup failed") } return } - //logger.Info("No parts to send in catch-up, sleeping") + // logger.Info("No parts to send in catch-up, sleeping") time.Sleep(conR.conS.config.PeerGossipSleepDuration) } @@ -626,8 +652,8 @@ OUTER_LOOP: sleeping = 0 } - //logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, - // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) + // logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, + // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) // If height matches, then send LastCommit, Prevotes, Precommits. if rs.Height == prs.Height { @@ -648,13 +674,14 @@ OUTER_LOOP: // Catchup logic // If peer is lagging by more than 1, send Commit. - if prs.Height != 0 && rs.Height >= prs.Height+2 { + if prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= conR.conS.blockStore.Base() { // Load the block commit for prs.Height, // which contains precommit signatures for prs.Height. - commit := conR.conS.blockStore.LoadBlockCommit(prs.Height) - if ps.PickSendVote(commit) { - logger.Debug("Picked Catchup commit to send", "height", prs.Height) - continue OUTER_LOOP + if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil { + if ps.PickSendVote(commit) { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) + continue OUTER_LOOP + } } } @@ -752,10 +779,10 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.PrevoteType, + Type: tmproto.PrevoteType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -769,10 +796,10 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.PrecommitType, + Type: tmproto.PrecommitType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -786,10 +813,10 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.ProposalPOLRound, - Type: types.PrevoteType, + Type: tmproto.PrevoteType, BlockID: maj23, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -806,10 +833,10 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ Height: prs.Height, Round: commit.Round, - Type: types.PrecommitType, + Type: tmproto.PrecommitType, BlockID: commit.BlockID, })) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) @@ -886,19 +913,9 @@ func (conR *Reactor) StringIndented(indent string) string { return s } -func (conR *Reactor) updateFastSyncingMetric() { - var fastSyncing float64 - if conR.fastSync { - fastSyncing = 1 - } else { - fastSyncing = 0 - } - conR.metrics.FastSyncing.Set(fastSyncing) -} - // ReactorMetrics sets the metrics func ReactorMetrics(metrics *Metrics) ReactorOption { - return func(conR *Reactor) { conR.metrics = metrics } + return func(conR *Reactor) { conR.Metrics = metrics } } //----------------------------------------------------------------------------- @@ -964,12 +981,12 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { return &prs } -// ToJSON returns a json of PeerState, marshalled using go-amino. +// ToJSON returns a json of PeerState. func (ps *PeerState) ToJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cdc.MarshalJSON(ps) + return tmjson.Marshal(ps) } // GetHeight returns an atomic snapshot of the PeerRoundState's height @@ -1000,14 +1017,14 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { return } - ps.PRS.ProposalBlockPartsHeader = proposal.BlockID.PartsHeader - ps.PRS.ProposalBlockParts = bits.NewBitArray(proposal.BlockID.PartsHeader.Total) + ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total)) ps.PRS.ProposalPOLRound = proposal.POLRound ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. } // InitProposalBlockParts initializes the peer's proposal block parts header and bit array. -func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { +func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1015,12 +1032,12 @@ func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { return } - ps.PRS.ProposalBlockPartsHeader = partsHeader - ps.PRS.ProposalBlockParts = bits.NewBitArray(partsHeader.Total) + ps.PRS.ProposalBlockPartSetHeader = partSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total)) } // SetHasProposalBlockPart sets the given block part index as known for the peer. -func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) { +func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1037,7 +1054,7 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { msg := &VoteMessage{vote} ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - if ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) { + if ps.peer.Send(VoteChannel, MustEncode(msg)) { ps.SetHasVote(vote) return true } @@ -1057,7 +1074,8 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } - height, round, votesType, size := votes.GetHeight(), votes.GetRound(), types.SignedMsgType(votes.Type()), votes.Size() + height, round, votesType, size := + votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { @@ -1070,12 +1088,12 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false // Not something worth sending } if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - return votes.GetByIndex(index), true + return votes.GetByIndex(int32(index)), true } return nil, false } -func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.SignedMsgType) *bits.BitArray { +func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray { if !types.IsVoteTypeValid(votesType) { return nil } @@ -1083,25 +1101,25 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.Si if ps.PRS.Height == height { if ps.PRS.Round == round { switch votesType { - case types.PrevoteType: + case tmproto.PrevoteType: return ps.PRS.Prevotes - case types.PrecommitType: + case tmproto.PrecommitType: return ps.PRS.Precommits } } if ps.PRS.CatchupCommitRound == round { switch votesType { - case types.PrevoteType: + case tmproto.PrevoteType: return nil - case types.PrecommitType: + case tmproto.PrecommitType: return ps.PRS.CatchupCommit } } if ps.PRS.ProposalPOLRound == round { switch votesType { - case types.PrevoteType: + case tmproto.PrevoteType: return ps.PRS.ProposalPOL - case types.PrecommitType: + case tmproto.PrecommitType: return nil } } @@ -1110,9 +1128,9 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.Si if ps.PRS.Height == height+1 { if ps.PRS.LastCommitRound == round { switch votesType { - case types.PrevoteType: + case tmproto.PrevoteType: return nil - case types.PrecommitType: + case tmproto.PrecommitType: return ps.PRS.LastCommit } } @@ -1122,7 +1140,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.Si } // 'round': A round for which we have a +2/3 commit. -func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) { if ps.PRS.Height != height { return } @@ -1227,7 +1245,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int64, round int, voteType types.SignedMsgType, index int) { +func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) { logger := ps.logger.With( "peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), @@ -1238,7 +1256,7 @@ func (ps *PeerState) setHasVote(height int64, round int, voteType types.SignedMs // NOTE: some may be nil BitArrays -> no side effects. psVotes := ps.getVoteBitArray(height, round, voteType) if psVotes != nil { - psVotes.SetIndex(index, true) + psVotes.SetIndex(int(index), true) } } @@ -1265,7 +1283,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { ps.PRS.StartTime = startTime if psHeight != msg.Height || psRound != msg.Round { ps.PRS.Proposal = false - ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{} + ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{} ps.PRS.ProposalBlockParts = nil ps.PRS.ProposalPOLRound = -1 ps.PRS.ProposalPOL = nil @@ -1308,7 +1326,7 @@ func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { return } - ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader + ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader ps.PRS.ProposalBlockParts = msg.BlockParts } @@ -1390,25 +1408,25 @@ type Message interface { ValidateBasic() error } -func RegisterMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*Message)(nil), nil) - cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) - cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil) - cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) - cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) - cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) - cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil) - cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil) - cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil) - cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil) +func init() { + tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage") + tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage") + tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal") + tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL") + tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart") + tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote") + tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote") + tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") + tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") } func decodeMsg(bz []byte) (msg Message, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + pb := &tmcons.Message{} + if err = proto.Unmarshal(bz, pb); err != nil { + return msg, err } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return + + return MsgFromProto(pb) } //------------------------------------- @@ -1417,10 +1435,10 @@ func decodeMsg(bz []byte) (msg Message, err error) { // For every height/round/step transition type NewRoundStepMessage struct { Height int64 - Round int + Round int32 Step cstypes.RoundStepType - SecondsSinceStartTime int - LastCommitRound int + SecondsSinceStartTime int64 + LastCommitRound int32 } // ValidateBasic performs basic validation. @@ -1437,9 +1455,29 @@ func (m *NewRoundStepMessage) ValidateBasic() error { // NOTE: SecondsSinceStartTime may be negative - if (m.Height == 1 && m.LastCommitRound != -1) || - (m.Height > 1 && m.LastCommitRound < -1) { // TODO: #2737 LastCommitRound should always be >= 0 for heights > 1 - return errors.New("invalid LastCommitRound (for 1st block: -1, for others: >= 0)") + // LastCommitRound will be -1 for the initial height, but we don't know what height this is + // since it can be specified in genesis. The reactor will have to validate this via + // ValidateHeight(). + if m.LastCommitRound < -1 { + return errors.New("invalid LastCommitRound (cannot be < -1)") + } + + return nil +} + +// ValidateHeight validates the height given the chain's initial height. +func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { + if m.Height < initialHeight { + return fmt.Errorf("invalid Height %v (lower than initial height %v)", + m.Height, initialHeight) + } + if m.Height == initialHeight && m.LastCommitRound != -1 { + return fmt.Errorf("invalid LastCommitRound %v (must be -1 for initial height %v)", + m.LastCommitRound, initialHeight) + } + if m.Height > initialHeight && m.LastCommitRound < 0 { + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + initialHeight) } return nil } @@ -1453,14 +1491,14 @@ func (m *NewRoundStepMessage) String() string { //------------------------------------- // NewValidBlockMessage is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlockMessage struct { - Height int64 - Round int - BlockPartsHeader types.PartSetHeader - BlockParts *bits.BitArray - IsCommit bool + Height int64 + Round int32 + BlockPartSetHeader types.PartSetHeader + BlockParts *bits.BitArray + IsCommit bool } // ValidateBasic performs basic validation. @@ -1471,19 +1509,19 @@ func (m *NewValidBlockMessage) ValidateBasic() error { if m.Round < 0 { return errors.New("negative Round") } - if err := m.BlockPartsHeader.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockPartsHeader: %v", err) + if err := m.BlockPartSetHeader.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockPartSetHeader: %v", err) } if m.BlockParts.Size() == 0 { return errors.New("empty blockParts") } - if m.BlockParts.Size() != m.BlockPartsHeader.Total { - return fmt.Errorf("blockParts bit array size %d not equal to BlockPartsHeader.Total %d", + if m.BlockParts.Size() != int(m.BlockPartSetHeader.Total) { + return fmt.Errorf("blockParts bit array size %d not equal to BlockPartSetHeader.Total %d", m.BlockParts.Size(), - m.BlockPartsHeader.Total) + m.BlockPartSetHeader.Total) } - if m.BlockParts.Size() > types.MaxBlockPartsCount { - return errors.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) + if m.BlockParts.Size() > int(types.MaxBlockPartsCount) { + return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) } return nil } @@ -1491,7 +1529,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error { // String returns a string representation. func (m *NewValidBlockMessage) String() string { return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", - m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit) + m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit) } //------------------------------------- @@ -1516,7 +1554,7 @@ func (m *ProposalMessage) String() string { // ProposalPOLMessage is sent when a previous proposal is re-proposed. type ProposalPOLMessage struct { Height int64 - ProposalPOLRound int + ProposalPOLRound int32 ProposalPOL *bits.BitArray } @@ -1532,7 +1570,7 @@ func (m *ProposalPOLMessage) ValidateBasic() error { return errors.New("empty ProposalPOL bit array") } if m.ProposalPOL.Size() > types.MaxVotesCount { - return errors.Errorf("ProposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) + return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) } return nil } @@ -1547,7 +1585,7 @@ func (m *ProposalPOLMessage) String() string { // BlockPartMessage is sent when gossipping a piece of the proposed block. type BlockPartMessage struct { Height int64 - Round int + Round int32 Part *types.Part } @@ -1592,9 +1630,9 @@ func (m *VoteMessage) String() string { // HasVoteMessage is sent to indicate that a particular vote has been received. type HasVoteMessage struct { Height int64 - Round int - Type types.SignedMsgType - Index int + Round int32 + Type tmproto.SignedMsgType + Index int32 } // ValidateBasic performs basic validation. @@ -1624,8 +1662,8 @@ func (m *HasVoteMessage) String() string { // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23Message struct { Height int64 - Round int - Type types.SignedMsgType + Round int32 + Type tmproto.SignedMsgType BlockID types.BlockID } @@ -1656,8 +1694,8 @@ func (m *VoteSetMaj23Message) String() string { // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. type VoteSetBitsMessage struct { Height int64 - Round int - Type types.SignedMsgType + Round int32 + Type tmproto.SignedMsgType BlockID types.BlockID Votes *bits.BitArray } @@ -1667,9 +1705,6 @@ func (m *VoteSetBitsMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } - if m.Round < 0 { - return errors.New("negative Round") - } if !types.IsVoteTypeValid(m.Type) { return errors.New("invalid Type") } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index a0a8c9732..f23ec727d 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -21,14 +22,18 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/bits" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/p2p/mock" + p2pmock "github.com/tendermint/tendermint/p2p/mock" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" + statemocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -36,6 +41,8 @@ import ( //---------------------------------------------- // in-process testnets +var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + func startConsensusNet(t *testing.T, css []*State, n int) ( []*Reactor, []types.Subscription, @@ -58,8 +65,11 @@ func startConsensusNet(t *testing.T, css []*State, n int) ( require.NoError(t, err) blocksSubs = append(blocksSubs, blocksSub) - if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake - sm.SaveState(css[i].blockExec.DB(), css[i].state) + if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake + if err := css[i].blockExec.Store().Save(css[i].state); err != nil { + t.Error(err) + } + } } // make connected switches and start all reactors @@ -75,7 +85,7 @@ func startConsensusNet(t *testing.T, css []*State, n int) ( // TODO: is this still true with new pubsub? for i := 0; i < n; i++ { s := reactors[i].conS.GetState() - reactors[i].SwitchToConsensus(s, 0) + reactors[i].SwitchToConsensus(s, false) } return reactors, blocksSubs, eventBuses } @@ -84,11 +94,15 @@ func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*type logger.Info("stopConsensusNet", "n", len(reactors)) for i, r := range reactors { logger.Info("stopConsensusNet: Stopping Reactor", "i", i) - r.Switch.Stop() + if err := r.Switch.Stop(); err != nil { + logger.Error("error trying to stop switch", "error", err) + } } for i, b := range eventBuses { logger.Info("stopConsensusNet: Stopping eventBus", "i", i) - b.Stop() + if err := b.Stop(); err != nil { + logger.Error("error trying to stop eventbus", "error", err) + } } logger.Info("stopConsensusNet: DONE", "n", len(reactors)) } @@ -108,9 +122,6 @@ func TestReactorBasic(t *testing.T) { // Ensure we can process blocks with evidence func TestReactorWithEvidence(t *testing.T) { - types.RegisterMockEvidences(cdc) - types.RegisterMockEvidences(types.GetCodec()) - nValidators := 4 testName := "consensus_reactor_test" tickerFunc := newMockTickerFunc(true) @@ -125,7 +136,8 @@ func TestReactorWithEvidence(t *testing.T) { logger := consensusLogger() for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -141,7 +153,7 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(sync.Mutex) + mtx := new(tmsync.Mutex) proxyAppConnMem := abcicli.NewLocalClient(mtx, app) proxyAppConnCon := abcicli.NewLocalClient(mtx, app) @@ -155,19 +167,25 @@ func TestReactorWithEvidence(t *testing.T) { // mock the evidence pool // everyone includes evidence of another double signing vIdx := (i + 1) % nValidators - pubKey, err := privVals[vIdx].GetPubKey() - require.NoError(t, err) - evpool := newMockEvidencePool(pubKey.Address()) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) + evpool := &statemocks.EvidencePool{} + evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) + evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ + ev}, int64(len(ev.Bytes()))) + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + + evpool2 := sm.EmptyEvidencePool{} // Make State - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) eventBus := types.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) - eventBus.Start() + err := eventBus.Start() + require.NoError(t, err) cs.SetEventBus(eventBus) cs.SetTimeoutTicker(tickerFunc()) @@ -179,52 +197,15 @@ func TestReactorWithEvidence(t *testing.T) { reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - // wait till everyone makes the first new block with no evidence - timeoutWaitGroup(t, nValidators, func(j int) { - msg := <-blocksSubs[j].Out() - block := msg.Data().(types.EventDataNewBlock).Block - assert.True(t, len(block.Evidence.Evidence) == 0) - }, css) - - // second block should have evidence - timeoutWaitGroup(t, nValidators, func(j int) { - msg := <-blocksSubs[j].Out() - block := msg.Data().(types.EventDataNewBlock).Block - assert.True(t, len(block.Evidence.Evidence) > 0) - }, css) -} - -// mock evidence pool returns no evidence for block 1, -// and returnes one piece for all higher blocks. The one piece -// is for a given validator at block 1. -type mockEvidencePool struct { - height int - ev []types.Evidence -} - -func newMockEvidencePool(val []byte) *mockEvidencePool { - return &mockEvidencePool{ - ev: []types.Evidence{types.NewMockEvidence(1, time.Now().UTC(), 1, val)}, - } -} - -// NOTE: maxBytes is ignored -func (m *mockEvidencePool) PendingEvidence(maxBytes int64) []types.Evidence { - if m.height > 0 { - return m.ev - } - return nil -} -func (m *mockEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { - if m.height > 0 { - if len(block.Evidence.Evidence) == 0 { - panic("block has no evidence") - } + // we expect for each validator that is the proposer to propose one piece of evidence. + for i := 0; i < nValidators; i++ { + timeoutWaitGroup(t, nValidators, func(j int) { + msg := <-blocksSubs[j].Out() + block := msg.Data().(types.EventDataNewBlock).Block + assert.Len(t, block.Evidence.Evidence, 1) + }, css) } - m.height++ } -func (m *mockEvidencePool) IsCommitted(types.Evidence) bool { return false } //------------------------------------ @@ -259,8 +240,9 @@ func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] - peer = mock.NewPeer(nil) - msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + peer = p2pmock.NewPeer(nil) + msg = MustEncode(&HasVoteMessage{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) reactor.InitPeer(peer) @@ -281,8 +263,9 @@ func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) { var ( reactor = reactors[0] - peer = mock.NewPeer(nil) - msg = cdc.MustMarshalBinaryBare(&HasVoteMessage{Height: 1, Round: 1, Index: 1, Type: types.PrevoteType}) + peer = p2pmock.NewPeer(nil) + msg = MustEncode(&HasVoteMessage{Height: 1, + Round: 1, Index: 1, Type: tmproto.PrevoteType}) ) // we should call InitPeer here @@ -349,7 +332,9 @@ func TestReactorVotingPowerChange(t *testing.T) { val1PubKey, err := css[0].privValidator.GetPubKey() require.NoError(t, err) - val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) + + val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) + require.NoError(t, err) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() @@ -429,8 +414,9 @@ func TestReactorValidatorSetChanges(t *testing.T) { logger.Info("---------------------------- Testing adding one validator") newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() - require.NoError(t, err) - valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) + assert.NoError(t, err) + valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + assert.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) // wait till everyone makes block 2 @@ -458,7 +444,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) + updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() @@ -479,12 +466,14 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() require.NoError(t, err) - newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) + newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() require.NoError(t, err) - newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) + newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) @@ -650,7 +639,7 @@ func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { // we're running many nodes in-process, possibly in in a virtual machine, // and spewing debug messages - making a block could take a while, - timeout := time.Second * 300 + timeout := time.Second * 120 select { case <-done: @@ -662,7 +651,8 @@ func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { t.Log("") } os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + require.NoError(t, err) capture() panic("Timed out waiting for all validators to commit a block") } @@ -680,18 +670,19 @@ func capture() { func TestNewRoundStepMessageValidateBasic(t *testing.T) { testCases := []struct { // nolint: maligned expectErr bool - messageRound int - messageLastCommitRound int + messageRound int32 + messageLastCommitRound int32 messageHeight int64 testName string messageStep cstypes.RoundStepType }{ - {false, 0, 0, 0, "Valid Message", 0x01}, - {true, -1, 0, 0, "Invalid Message", 0x01}, - {true, 0, 0, -1, "Invalid Message", 0x01}, - {true, 0, 0, 1, "Invalid Message", 0x00}, - {true, 0, 0, 1, "Invalid Message", 0x00}, - {true, 0, -2, 2, "Invalid Message", 0x01}, + {false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight}, + {true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight}, + {true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight}, + {true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1}, + // The following cases will be handled by ValidateHeight + {false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight}, + {false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight}, } for _, tc := range testCases { @@ -704,7 +695,47 @@ func TestNewRoundStepMessageValidateBasic(t *testing.T) { LastCommitRound: tc.messageLastCommitRound, } - assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") + err := message.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestNewRoundStepMessageValidateHeight(t *testing.T) { + initialHeight := int64(10) + testCases := []struct { // nolint: maligned + expectErr bool + messageLastCommitRound int32 + messageHeight int64 + testName string + }{ + {false, 0, 11, "Valid Message"}, + {true, 0, -1, "Negative height"}, + {true, 0, 0, "Zero height"}, + {true, 0, 10, "Initial height but LCR != -1 "}, + {true, -1, 11, "Normal height but LCR < 0"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + message := NewRoundStepMessage{ + Height: tc.messageHeight, + Round: 0, + Step: cstypes.RoundStepNewHeight, + LastCommitRound: tc.messageLastCommitRound, + } + + err := message.ValidateHeight(initialHeight) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } }) } } @@ -718,16 +749,19 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, { - func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 2 }, - "blockParts bit array size 1 not equal to BlockPartsHeader.Total 2", + func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 }, + "blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2", }, { - func(msg *NewValidBlockMessage) { msg.BlockPartsHeader.Total = 0; msg.BlockParts = bits.NewBitArray(0) }, + func(msg *NewValidBlockMessage) { + msg.BlockPartSetHeader.Total = 0 + msg.BlockParts = bits.NewBitArray(0) + }, "empty blockParts", }, { - func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(types.MaxBlockPartsCount + 1) }, - "blockParts bit array size 1602 not equal to BlockPartsHeader.Total 1", + func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, + "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", }, } @@ -737,7 +771,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { msg := &NewValidBlockMessage{ Height: 1, Round: 0, - BlockPartsHeader: types.PartSetHeader{ + BlockPartSetHeader: types.PartSetHeader{ Total: 1, }, BlockParts: bits.NewBitArray(1), @@ -762,7 +796,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, - "ProposalPOL bit array is too big: 10001, max: 10000"}, + "proposalPOL bit array is too big: 10001, max: 10000"}, } for i, tc := range testCases { @@ -789,7 +823,7 @@ func TestBlockPartMessageValidateBasic(t *testing.T) { testCases := []struct { testName string messageHeight int64 - messageRound int + messageRound int32 messagePart *types.Part expectErr bool }{ @@ -812,24 +846,24 @@ func TestBlockPartMessageValidateBasic(t *testing.T) { } message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} - message.Part.Index = -1 + message.Part.Index = 1 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") } func TestHasVoteMessageValidateBasic(t *testing.T) { const ( - validSignedMsgType types.SignedMsgType = 0x01 - invalidSignedMsgType types.SignedMsgType = 0x03 + validSignedMsgType tmproto.SignedMsgType = 0x01 + invalidSignedMsgType tmproto.SignedMsgType = 0x03 ) testCases := []struct { // nolint: maligned expectErr bool - messageRound int - messageIndex int + messageRound int32 + messageIndex int32 messageHeight int64 testName string - messageType types.SignedMsgType + messageType tmproto.SignedMsgType }{ {false, 0, 0, 0, "Valid Message", validSignedMsgType}, {true, -1, 0, 0, "Invalid Message", validSignedMsgType}, @@ -855,25 +889,25 @@ func TestHasVoteMessageValidateBasic(t *testing.T) { func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { const ( - validSignedMsgType types.SignedMsgType = 0x01 - invalidSignedMsgType types.SignedMsgType = 0x03 + validSignedMsgType tmproto.SignedMsgType = 0x01 + invalidSignedMsgType tmproto.SignedMsgType = 0x03 ) validBlockID := types.BlockID{} invalidBlockID := types.BlockID{ Hash: bytes.HexBytes{}, - PartsHeader: types.PartSetHeader{ - Total: -1, - Hash: bytes.HexBytes{}, + PartSetHeader: types.PartSetHeader{ + Total: 1, + Hash: []byte{0}, }, } testCases := []struct { // nolint: maligned expectErr bool - messageRound int + messageRound int32 messageHeight int64 testName string - messageType types.SignedMsgType + messageType tmproto.SignedMsgType messageBlockID types.BlockID }{ {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, @@ -899,23 +933,22 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { } func TestVoteSetBitsMessageValidateBasic(t *testing.T) { - testCases := []struct { // nolint: maligned + testCases := []struct { malleateFn func(*VoteSetBitsMessage) expErr string }{ {func(msg *VoteSetBitsMessage) {}, ""}, {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, - {func(msg *VoteSetBitsMessage) { msg.Round = -1 }, "negative Round"}, {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, {func(msg *VoteSetBitsMessage) { msg.BlockID = types.BlockID{ Hash: bytes.HexBytes{}, - PartsHeader: types.PartSetHeader{ - Total: -1, - Hash: bytes.HexBytes{}, + PartSetHeader: types.PartSetHeader{ + Total: 1, + Hash: []byte{0}, }, } - }, "wrong BlockID: wrong PartsHeader: negative Total"}, + }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, "votes bit array is too big: 10001, max: 10000"}, } diff --git a/consensus/replay.go b/consensus/replay.go index 3d9d6614b..bfec9e96d 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -6,21 +6,14 @@ import ( "hash/crc32" "io" "reflect" - - //"strconv" - //"strings" "time" abci "github.com/tendermint/tendermint/abci/types" - //auto "github.com/tendermint/tendermint/libs/autofile" - dbm "github.com/tendermint/tm-db" - + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) var crc32c = crc32.MakeTable(crc32.Castagnoli) @@ -76,7 +69,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr case *ProposalMessage: p := msg.Proposal cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", - p.BlockID.PartsHeader, "pol", p.POLRound, "peer", peerID) + p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) case *BlockPartMessage: cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: @@ -125,16 +118,23 @@ func (cs *State) catchupReplay(csHeight int64) error { // Search for last height marker. // // Ignore data corruption errors in previous heights because we only care about last height - gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if csHeight < cs.state.InitialHeight { + return fmt.Errorf("cannot replay height %v, below initial height %v", csHeight, cs.state.InitialHeight) + } + endHeight := csHeight - 1 + if csHeight == cs.state.InitialHeight { + endHeight = 0 + } + gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) if err == io.EOF { - cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1) + cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) } else if err != nil { return err } if !found { - return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1) + return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, endHeight) } - defer gr.Close() // nolint: errcheck + defer gr.Close() cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) @@ -198,7 +198,7 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { //--------------------------------------------------- type Handshaker struct { - stateDB dbm.DB + stateStore sm.Store initialState sm.State store sm.BlockStore eventBus types.BlockEventPublisher @@ -208,11 +208,11 @@ type Handshaker struct { nBlocks int // number of blocks applied to the state } -func NewHandshaker(stateDB dbm.DB, state sm.State, +func NewHandshaker(stateStore sm.Store, state sm.State, store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { return &Handshaker{ - stateDB: stateDB, + stateStore: stateStore, initialState: state, store: store, eventBus: types.NopEventBus{}, @@ -259,10 +259,9 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { "protocol-version", res.AppVersion, ) - // Set AppVersion on the state. - if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) { - h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) - sm.SaveState(h.stateDB, h.initialState) + // Only set the version if there is no existing state. + if h.initialState.LastBlockHeight == 0 { + h.initialState.Version.Consensus.App = res.AppVersion } // Replay blocks up to the latest in the blockstore. @@ -312,6 +311,7 @@ func (h *Handshaker) ReplayBlocks( req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, + InitialHeight: h.genDoc.InitialHeight, ConsensusParams: csParams, Validators: nextVals, AppStateBytes: h.genDoc.AppState, @@ -321,7 +321,15 @@ func (h *Handshaker) ReplayBlocks( return nil, err } - if stateBlockHeight == 0 { //we only update state when we are in initial state + appHash = res.AppHash + + if stateBlockHeight == 0 { // we only update state when we are in initial state + // If the app did not return an app hash, we keep the one set from the genesis doc in + // the state. We don't set appHash since we don't want the genesis doc app hash + // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. + if len(res.AppHash) > 0 { + state.AppHash = res.AppHash + } // If the app returned validators or consensus params, update the state. if len(res.Validators) > 0 { vals, err := types.PB2TM.ValidatorUpdates(res.Validators) @@ -329,16 +337,21 @@ func (h *Handshaker) ReplayBlocks( return nil, err } state.Validators = types.NewValidatorSet(vals) - state.NextValidators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") } if res.ConsensusParams != nil { - state.ConsensusParams = state.ConsensusParams.Update(res.ConsensusParams) + state.ConsensusParams = types.UpdateConsensusParams(state.ConsensusParams, res.ConsensusParams) + state.Version.Consensus.App = state.ConsensusParams.Version.AppVersion + } + // We update the last results hash with the empty hash, to conform with RFC-6962. + state.LastResultsHash = merkle.HashFromByteSlices(nil) + if err := h.stateStore.Save(state); err != nil { + return nil, err } - sm.SaveState(h.stateDB, state) } } @@ -348,7 +361,11 @@ func (h *Handshaker) ReplayBlocks( assertAppHashEqualsOneFromState(appHash, state) return appHash, nil - case appBlockHeight < storeBlockBase-1: + case appBlockHeight == 0 && state.InitialHeight < storeBlockBase: + // the app has no state, and the block store is truncated above the initial height + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + + case appBlockHeight > 0 && appBlockHeight < storeBlockBase-1: // the app is too far behind truncated store (can be 1 behind since we replay the next) return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} @@ -401,7 +418,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) + abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) if err != nil { return nil, err } @@ -439,7 +456,11 @@ func (h *Handshaker) replayBlocks( if mutateState { finalBlock-- } - for i := appBlockHeight + 1; i <= finalBlock; i++ { + firstBlock := appBlockHeight + 1 + if firstBlock == 1 { + firstBlock = state.InitialHeight + } + for i := firstBlock; i <= finalBlock; i++ { h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. @@ -447,7 +468,7 @@ func (h *Handshaker) replayBlocks( assertAppHashEqualsOneFromBlock(appHash, block) } - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight) if err != nil { return nil, err } @@ -473,7 +494,9 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, mock.Mempool{}, sm.MockEvidencePool{}) + // Use stubs for both mempool and evidence pool since no transactions nor + // evidence are needed here - block already exists. + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) blockExec.SetEventBus(h.eventBus) var err error @@ -508,46 +531,3 @@ Did you reset Tendermint without resetting your application's data?`, appHash, state.AppHash, state)) } } - -//-------------------------------------------------------------------------------- -// mockProxyApp uses ABCIResponses to give the right results -// Useful because we don't want to call Commit() twice for the same block on the real app. - -func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppConnConsensus { - clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ - appHash: appHash, - abciResponses: abciResponses, - }) - cli, _ := clientCreator.NewABCIClient() - err := cli.Start() - if err != nil { - panic(err) - } - return proxy.NewAppConnConsensus(cli) -} - -type mockProxyApp struct { - abci.BaseApplication - - appHash []byte - txCount int - abciResponses *sm.ABCIResponses -} - -func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - r := mock.abciResponses.DeliverTxs[mock.txCount] - mock.txCount++ - if r == nil { //it could be nil because of amino unMarshall, it will cause an empty ResponseDeliverTx to become nil - return abci.ResponseDeliverTx{} - } - return *r -} - -func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - mock.txCount = 0 - return *mock.abciResponses.EndBlock -} - -func (mock *mockProxyApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{Data: mock.appHash} -} diff --git a/consensus/replay_file.go b/consensus/replay_file.go index b8b8c51da..4bf7466ab 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -3,19 +3,18 @@ package consensus import ( "bufio" "context" + "errors" "fmt" "io" "os" "strconv" "strings" - "github.com/pkg/errors" dbm "github.com/tendermint/tm-db" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" @@ -56,9 +55,13 @@ func (cs *State) ReplayFile(file string, console bool) error { ctx := context.Background() newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) if err != nil { - return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } - defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) + defer func() { + if err := cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + cs.Logger.Error("Error unsubscribing to event bus", "err", err) + } + }() // just open the file for reading, no need to use wal fp, err := os.OpenFile(file, os.O_RDONLY, 0600) @@ -67,7 +70,7 @@ func (cs *State) ReplayFile(file string, console bool) error { } pb := newPlayback(file, fp, cs, cs.state.Copy()) - defer pb.fp.Close() // nolint: errcheck + defer pb.fp.Close() var nextN int // apply N msgs in a row var msg *TimedWALMessage @@ -121,7 +124,9 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl // go back count steps by resetting the state and running (pb.count - count) steps func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { - pb.cs.Stop() + if err := pb.cs.Stop(); err != nil { + return err + } pb.cs.Wait() newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, @@ -219,7 +224,11 @@ func (pb *playback) replayConsoleLoop() int { if err != nil { tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) } - defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) + defer func() { + if err := pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) + } + }() if len(tokens) == 1 { if err := pb.replayReset(1, newStepSub); err != nil { @@ -277,11 +286,18 @@ func (pb *playback) replayConsoleLoop() int { func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { dbType := dbm.BackendType(config.DBBackend) // Get BlockStore - blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } + stateStore := sm.NewStore(stateDB) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) @@ -304,15 +320,15 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } - handshaker := NewHandshaker(stateDB, state, blockStore, gdoc) + handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) handshaker.SetEventBus(eventBus) err = handshaker.Handshake(proxyApp) if err != nil { tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) } - mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go new file mode 100644 index 000000000..08974a67e --- /dev/null +++ b/consensus/replay_stubs.go @@ -0,0 +1,90 @@ +package consensus + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + mempl "github.com/tendermint/tendermint/mempool" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- + +type emptyMempool struct{} + +var _ mempl.Mempool = emptyMempool{} + +func (emptyMempool) Lock() {} +func (emptyMempool) Unlock() {} +func (emptyMempool) Size() int { return 0 } +func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { + return nil +} +func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) Update( + _ int64, + _ types.Txs, + _ []*abci.ResponseDeliverTx, + _ mempl.PreCheckFunc, + _ mempl.PostCheckFunc, +) error { + return nil +} +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) TxsBytes() int64 { return 0 } + +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + +func (emptyMempool) InitWAL() error { return nil } +func (emptyMempool) CloseWAL() {} + +//----------------------------------------------------------------------------- +// mockProxyApp uses ABCIResponses to give the right results. +// +// Useful because we don't want to call Commit() twice for the same block on +// the real app. + +func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { + clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + appHash: appHash, + abciResponses: abciResponses, + }) + cli, _ := clientCreator.NewABCIClient() + err := cli.Start() + if err != nil { + panic(err) + } + return proxy.NewAppConnConsensus(cli) +} + +type mockProxyApp struct { + abci.BaseApplication + + appHash []byte + txCount int + abciResponses *tmstate.ABCIResponses +} + +func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + r := mock.abciResponses.DeliverTxs[mock.txCount] + mock.txCount++ + if r == nil { + return abci.ResponseDeliverTx{} + } + return *r +} + +func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + mock.txCount = 0 + return *mock.abciResponses.EndBlock +} + +func (mock *mockProxyApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{Data: mock.appHash} +} diff --git a/consensus/replay_test.go b/consensus/replay_test.go index f886cdeeb..2970f15ed 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -9,29 +9,29 @@ import ( "os" "path/filepath" "runtime" + "sort" "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "sort" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" mempl "github.com/tendermint/tendermint/mempool" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) func TestMain(m *testing.M) { @@ -65,9 +65,9 @@ func TestMain(m *testing.M) { // wal writer when we need to, instead of with every message. func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { + lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() - state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) + state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, @@ -83,7 +83,11 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi err := cs.Start() require.NoError(t, err) - defer cs.Stop() + defer func() { + if err := cs.Stop(); err != nil { + t.Error(err) + } + }() // This is just a signal that we haven't halted; its not something contained // in the WAL itself. Assuming the consensus state is running, replay of any @@ -107,7 +111,9 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := []byte{byte(i)} - assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}) + if err := assertMempool(cs.txNotifier).CheckTx(tx, nil, mempl.TxInfo{}); err != nil { + panic(err) + } i++ } } @@ -153,7 +159,9 @@ LOOP: logger := log.NewNopLogger() blockDB := dbm.NewMemDB() stateDB := blockDB - state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) + stateStore := sm.NewStore(stateDB) + state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) + require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, @@ -176,6 +184,7 @@ LOOP: csWal, err := cs.OpenWAL(walFile) require.NoError(t, err) crashingWal.next = csWal + // reset the message counter crashingWal.msgIndex = 1 cs.wal = crashingWal @@ -191,10 +200,10 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB) + startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) - cs.Stop() + cs.Stop() //nolint:errcheck // Logging this error causes failure cancel() // if we reached the required height, exit @@ -295,8 +304,8 @@ const ( ) var ( - mempool = mock.Mempool{} - evpool = sm.MockEvidencePool{} + mempool = emptyMempool{} + evpool = sm.EmptyEvidencePool{} sim testSim ) @@ -331,34 +340,39 @@ func TestSimulateValidatorsChange(t *testing.T) { vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { - vss[i] = newValidatorStub(css[i].privValidator, i) + vss[i] = newValidatorStub(css[i].privValidator, int32(i)) } height, round := css[0].Height, css[0].Round + // start the machine startTestRound(css[0], height, round) incrementHeight(vss...) ensureNewRound(newRoundCh, height, 0) ensureNewProposal(proposalCh, height, round) rs := css[0].GetRoundState() - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) - //height 2 + // HEIGHT 2 height++ incrementHeight(vss...) newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) + valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) assert.Nil(t, err) - propBlock, _ := css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(vss[1].Height, round, -1, blockID) - if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + p := proposal.ToProto() + if err := vss[1].SignProposal(config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } + proposal.Signature = p.Signature // set the proposal block if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { @@ -366,25 +380,29 @@ func TestSimulateValidatorsChange(t *testing.T) { } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) - //height 3 + // HEIGHT 3 height++ incrementHeight(vss...) updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() require.NoError(t, err) - updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) + updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal = types.NewProposal(vss[2].Height, round, -1, blockID) - if err := vss[2].SignProposal(config.ChainID(), proposal); err != nil { + p = proposal.ToProto() + if err := vss[2].SignProposal(config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } + proposal.Signature = p.Signature // set the proposal block if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { @@ -392,48 +410,56 @@ func TestSimulateValidatorsChange(t *testing.T) { } ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:nVals]...) ensureNewRound(newRoundCh, height+1, 0) - //height 4 + // HEIGHT 4 height++ incrementHeight(vss...) newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() require.NoError(t, err) - newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) + newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) assert.Nil(t, err) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() require.NoError(t, err) - newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) + newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{}) assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss := make([]*validatorStub, nVals+1) copy(newVss, vss[:nVals+1]) - sort.Sort(ValidatorStubsByAddress(newVss)) - selfIndex := 0 - for i, vs := range newVss { - vsPubKey, err := vs.GetPubKey() - require.NoError(t, err) + sort.Sort(ValidatorStubsByPower(newVss)) - css0PubKey, err := css[0].privValidator.GetPubKey() - require.NoError(t, err) + valIndexFn := func(cssIdx int) int { + for i, vs := range newVss { + vsPubKey, err := vs.GetPubKey() + require.NoError(t, err) - if vsPubKey.Equals(css0PubKey) { - selfIndex = i - break + cssPubKey, err := css[cssIdx].privValidator.GetPubKey() + require.NoError(t, err) + + if vsPubKey.Equals(cssPubKey) { + return i + } } + panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx)) } + selfIndex := valIndexFn(0) + proposal = types.NewProposal(vss[3].Height, round, -1, blockID) - if err := vss[3].SignProposal(config.ChainID(), proposal); err != nil { + p = proposal.ToProto() + if err := vss[3].SignProposal(config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } + proposal.Signature = p.Signature // set the proposal block if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { @@ -450,52 +476,49 @@ func TestSimulateValidatorsChange(t *testing.T) { if i == selfIndex { continue } - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) - //height 5 + // HEIGHT 5 height++ incrementHeight(vss...) + // Reflect the changes to vss[nVals] at height 3 and resort newVss. + newVssIdx := valIndexFn(nVals) + newVss[newVssIdx].VotingPower = 25 + sort.Sort(ValidatorStubsByPower(newVss)) + selfIndex = valIndexFn(0) ensureNewProposal(proposalCh, height, round) rs = css[0].GetRoundState() for i := 0; i < nVals+1; i++ { if i == selfIndex { continue } - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) - //height 6 + // HEIGHT 6 height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{}) assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() //changeProposer(t, cs1, vs2) + propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss = make([]*validatorStub, nVals+3) copy(newVss, vss[:nVals+3]) - sort.Sort(ValidatorStubsByAddress(newVss)) - for i, vs := range newVss { - vsKeyKey, err := vs.GetPubKey() - require.NoError(t, err) + sort.Sort(ValidatorStubsByPower(newVss)) - css0PubKey, err := css[0].privValidator.GetPubKey() - require.NoError(t, err) - - if vsKeyKey.Equals(css0PubKey) { - selfIndex = i - break - } - } + selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) - if err := vss[1].SignProposal(config.ChainID(), proposal); err != nil { + p = proposal.ToProto() + if err := vss[1].SignProposal(config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } + proposal.Signature = p.Signature // set the proposal block if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { @@ -507,7 +530,7 @@ func TestSimulateValidatorsChange(t *testing.T) { if i == selfIndex { continue } - signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) + signAddVotes(css[0], tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), newVss[i]) } ensureNewRound(newRoundCh, height+1, 0) @@ -561,27 +584,28 @@ func TestHandshakeReplayNone(t *testing.T) { // Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx func TestMockProxyApp(t *testing.T) { - sim.CleanupFunc() //clean the test env created in TestSimulateValidatorsChange + sim.CleanupFunc() // clean the test env created in TestSimulateValidatorsChange logger := log.TestingLogger() var validTxs, invalidTxs = 0, 0 txIndex := 0 assert.NotPanics(t, func() { - abciResWithEmptyDeliverTx := new(sm.ABCIResponses) + abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses) abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0) abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{}) // called when saveABCIResponses: - bytes := cdc.MustMarshalBinaryBare(abciResWithEmptyDeliverTx) - loadedAbciRes := new(sm.ABCIResponses) + bytes, err := proto.Marshal(abciResWithEmptyDeliverTx) + require.NoError(t, err) + loadedAbciRes := new(tmstate.ABCIResponses) // this also happens sm.LoadABCIResponses - err := cdc.UnmarshalBinaryBare(bytes, loadedAbciRes) + err = proto.Unmarshal(bytes, loadedAbciRes) require.NoError(t, err) mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) - abciRes := new(sm.ABCIResponses) + abciRes := new(tmstate.ABCIResponses) abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { @@ -631,17 +655,18 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin var commits []*types.Commit var store *mockBlockStore var stateDB dbm.DB - var genisisState sm.State + var genesisState sm.State if testValidatorsChange { testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) defer os.RemoveAll(testConfig.RootDir) stateDB = dbm.NewMemDB() - genisisState = sim.GenesisState + + genesisState = sim.GenesisState config = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits - store = newMockBlockStore(config, genisisState.ConsensusParams) - } else { //test single node + store = newMockBlockStore(config, genesisState.ConsensusParams) + } else { // test single node testConfig := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) defer os.RemoveAll(testConfig.RootDir) walBody, err := WALWithNBlocks(t, numBlocks) @@ -656,20 +681,25 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin wal.SetLogger(log.TestingLogger()) err = wal.Start() require.NoError(t, err) - defer wal.Stop() - + t.Cleanup(func() { + if err := wal.Stop(); err != nil { + t.Error(err) + } + }) chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) pubKey, err := privVal.GetPubKey() require.NoError(t, err) - stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) + stateDB, genesisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) + } + stateStore := sm.NewStore(stateDB) store.chain = chain store.commits = commits - state := genisisState.Copy() + state := genesisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) + state = buildTMStateFromChain(config, stateStore, state, chain, nBlocks, mode) latestAppHash := state.AppHash // make a new client creator @@ -682,8 +712,10 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) stateDB1 := dbm.NewMemDB() - sm.SaveState(stateDB1, genisisState) - buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) + stateStore := sm.NewStore(stateDB1) + err := stateStore.Save(genesisState) + require.NoError(t, err) + buildAppStateFromChain(proxyApp, stateStore, genesisState, chain, nBlocks, mode) } // Prune block store if requested @@ -697,12 +729,18 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) + handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator2) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - defer proxyApp.Stop() + + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) + err := handshaker.Handshake(proxyApp) if expectError { require.Error(t, err) @@ -737,11 +775,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } } -func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { +func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { testPartSize := types.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} + blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} newState, _, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) @@ -749,39 +787,40 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap return newState } -func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, +func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, state sm.State, chain []*types.Block, nBlocks int, mode uint) { // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { panic(err) } - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore - state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version + state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } - sm.SaveState(stateDB, state) //save height 1's validatorsInfo - + if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo + panic(err) + } switch mode { case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) + state = applyBlock(stateStore, state, chain[nBlocks-1], proxyApp) } default: panic(fmt.Sprintf("unknown mode %v", mode)) @@ -791,7 +830,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, func buildTMStateFromChain( config *cfg.Config, - stateDB dbm.DB, + stateStore sm.Store, state sm.State, chain []*types.Block, nBlocks int, @@ -804,34 +843,35 @@ func buildTMStateFromChain( if err := proxyApp.Start(); err != nil { panic(err) } - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck - state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version + state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { panic(err) } - sm.SaveState(stateDB, state) //save height 1's validatorsInfo - + if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo + panic(err) + } switch mode { case 0: // sync right up for _, block := range chain { - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) + applyBlock(stateStore, state, chain[len(chain)-1], proxyApp) default: panic(fmt.Sprintf("unknown mode %v", mode)) } @@ -851,6 +891,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, appVersion) + stateStore := sm.NewStore(stateDB) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks @@ -867,11 +908,17 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() require.NoError(t, err) - defer proxyApp.Stop() + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) assert.Panics(t, func() { - h := NewHandshaker(stateDB, state, store, genDoc) - h.Handshake(proxyApp) + h := NewHandshaker(stateStore, state, store, genDoc) + if err = h.Handshake(proxyApp); err != nil { + t.Log(err) + } }) } @@ -885,11 +932,17 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { proxyApp := proxy.NewAppConns(clientCreator) err := proxyApp.Start() require.NoError(t, err) - defer proxyApp.Stop() + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) assert.Panics(t, func() { - h := NewHandshaker(stateDB, state, store, genDoc) - h.Handshake(proxyApp) + h := NewHandshaker(stateStore, state, store, genDoc) + if err = h.Handshake(proxyApp); err != nil { + t.Log(err) + } }) } } @@ -976,7 +1029,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { if !found { return nil, nil, fmt.Errorf("wal does not contain height %d", height) } - defer gr.Close() // nolint: errcheck + defer gr.Close() // log.Notice("Build a blockchain by reading from the WAL") @@ -1005,11 +1058,20 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { case EndHeightMessage: // if its not the first one, we have a full block if thisBlockParts != nil { - var block = new(types.Block) - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) + var pbb = new(tmproto.Block) + bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + if err != nil { + panic(err) + } + err = proto.Unmarshal(bz, pbb) if err != nil { panic(err) } + block, err := types.BlockFromProto(pbb) + if err != nil { + panic(err) + } + if block.Height != height+1 { panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) } @@ -1029,15 +1091,23 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { return nil, nil, err } case *types.Vote: - if p.Type == types.PrecommitType { + if p.Type == tmproto.PrecommitType { thisBlockCommit = types.NewCommit(p.Height, p.Round, p.BlockID, []types.CommitSig{p.CommitSig()}) } } } // grab the last block too - var block = new(types.Block) - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) + bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) + if err != nil { + panic(err) + } + var pbb = new(tmproto.Block) + err = proto.Unmarshal(bz, pbb) + if err != nil { + panic(err) + } + block, err := types.BlockFromProto(pbb) if err != nil { panic(err) } @@ -1059,7 +1129,7 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { case msgInfo: switch msg := m.Msg.(type) { case *ProposalMessage: - return &msg.Proposal.BlockID.PartsHeader + return &msg.Proposal.BlockID.PartSetHeader case *BlockPartMessage: return msg.Part case *VoteMessage: @@ -1076,12 +1146,15 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { func stateAndStore( config *cfg.Config, pubKey crypto.PubKey, - appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) { + appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion store := newMockBlockStore(config, state.ConsensusParams) - sm.SaveState(stateDB, state) + if err := stateStore.Save(state); err != nil { + panic(err) + } return stateDB, state, store } @@ -1090,20 +1163,21 @@ func stateAndStore( type mockBlockStore struct { config *cfg.Config - params types.ConsensusParams + params tmproto.ConsensusParams chain []*types.Block commits []*types.Commit base int64 } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { +func newMockBlockStore(config *cfg.Config, params tmproto.ConsensusParams) *mockBlockStore { return &mockBlockStore{config, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } func (bs *mockBlockStore) Base() int64 { return bs.base } func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } +func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] @@ -1111,7 +1185,7 @@ func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ - BlockID: types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, + BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, Header: block.Header, } } @@ -1151,23 +1225,28 @@ func TestHandshakeUpdatesValidators(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, 0x0) + stateStore := sm.NewStore(stateDB) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) + handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } - defer proxyApp.Stop() + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) if err := handshaker.Handshake(proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } - // reload the state, check the validator set was updated - state = sm.LoadState(stateDB) + state, err = stateStore.Load() + require.NoError(t, err) newValAddr := state.Validators.Validators[0].Address expectValAddr := val.Address diff --git a/consensus/state.go b/consensus/state.go index b58bb3050..40399370c 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -2,36 +2,44 @@ package consensus import ( "bytes" + "errors" "fmt" + "io/ioutil" + "os" "reflect" "runtime/debug" - "sync" "time" - "github.com/pkg/errors" + "github.com/gogo/protobuf/proto" + cfg "github.com/tendermint/tendermint/config" + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/crypto" + tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/fail" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/libs/service" - tmtime "github.com/tendermint/tendermint/types/time" - - cfg "github.com/tendermint/tendermint/config" - cstypes "github.com/tendermint/tendermint/consensus/types" - tmevents "github.com/tendermint/tendermint/libs/events" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) //----------------------------------------------------------------------------- // Errors var ( - ErrInvalidProposalSignature = errors.New("error invalid proposal signature") - ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") - ErrAddingVote = errors.New("error adding vote") - ErrVoteHeightMismatch = errors.New("error vote height mismatch") + ErrInvalidProposalSignature = errors.New("error invalid proposal signature") + ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") + ErrAddingVote = errors.New("error adding vote") + ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") + + errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors") ) //----------------------------------------------------------------------------- @@ -50,7 +58,7 @@ type msgInfo struct { type timeoutInfo struct { Duration time.Duration `json:"duration"` Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` Step cstypes.RoundStepType `json:"step"` } @@ -65,7 +73,9 @@ type txNotifier interface { // interface to the evidence pool type evidencePool interface { - AddEvidence(types.Evidence) error + // Adds consensus based evidence to the evidence pool. This function differs to + // AddEvidence by bypassing verification and adding it immediately to the pool + AddEvidenceFromConsensus(types.Evidence) error } // State handles execution of the consensus algorithm. @@ -93,9 +103,12 @@ type State struct { evpool evidencePool // internal state - mtx sync.RWMutex + mtx tmsync.RWMutex cstypes.RoundState state sm.State // State until height-1. + // privValidator pubkey, memoized for the duration of one block + // to avoid extra requests to HSM + privValidatorPubKey crypto.PubKey // state changes may be triggered by: msgs from peers, // msgs from ourself, or by timeouts @@ -121,8 +134,8 @@ type State struct { nSteps int // some functions can be overwritten for testing - decideProposal func(height int64, round int) - doPrevote func(height int64, round int) + decideProposal func(height int64, round int32) + doPrevote func(height int64, round int32) setProposal func(proposal *types.Proposal) error // closed when we finish shutting down @@ -170,11 +183,16 @@ func NewState( cs.doPrevote = cs.defaultDoPrevote cs.setProposal = cs.defaultSetProposal + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + cs.reconstructLastCommit(state) + } + cs.updateToState(state) // Don't call scheduleRound0 yet. // We do that upon Start(). - cs.reconstructLastCommit(state) + cs.BaseService = *service.NewBaseService(nil, "State", cs) for _, option := range options { option(cs) @@ -205,7 +223,7 @@ func StateMetrics(metrics *Metrics) StateOption { // String returns a string. func (cs *State) String() string { // better not to access shared variables - return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) + return "ConsensusState" } // GetState returns a copy of the chain state. @@ -231,18 +249,18 @@ func (cs *State) GetRoundState() *cstypes.RoundState { return &rs } -// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino. +// GetRoundStateJSON returns a json of RoundState. func (cs *State) GetRoundStateJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return cdc.MarshalJSON(cs.RoundState) + return tmjson.Marshal(cs.RoundState) } -// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino. +// GetRoundStateSimpleJSON returns a json of RoundStateSimple func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return cdc.MarshalJSON(cs.RoundState.RoundStateSimple()) + return tmjson.Marshal(cs.RoundState.RoundStateSimple()) } // GetValidators returns a copy of the current validators. @@ -252,11 +270,17 @@ func (cs *State) GetValidators() (int64, []*types.Validator) { return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators } -// SetPrivValidator sets the private validator account for signing votes. +// SetPrivValidator sets the private validator account for signing votes. It +// immediately requests pubkey and caches it. func (cs *State) SetPrivValidator(priv types.PrivValidator) { cs.mtx.Lock() + defer cs.mtx.Unlock() + cs.privValidator = priv - cs.mtx.Unlock() + + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } } // SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing. @@ -276,23 +300,65 @@ func (cs *State) LoadCommit(height int64) *types.Commit { return cs.blockStore.LoadBlockCommit(height) } -// OnStart implements service.Service. -// It loads the latest state via the WAL, and starts the timeout and receive routines. +// OnStart loads the latest state via the WAL, and starts the timeout and +// receive routines. func (cs *State) OnStart() error { - if err := cs.evsw.Start(); err != nil { - return err - } - - // we may set the WAL in testing before calling Start, - // so only OpenWAL if its still the nilWAL + // We may set the WAL in testing before calling Start, so only OpenWAL if its + // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { - walFile := cs.config.WalFile() - wal, err := cs.OpenWAL(walFile) - if err != nil { - cs.Logger.Error("Error loading State wal", "err", err.Error()) + if err := cs.loadWalFile(); err != nil { return err } - cs.wal = wal + } + + // We may have lost some votes if the process crashed reload from consensus + // log to catchup. + if cs.doWALCatchup { + repairAttempted := false + LOOP: + for { + err := cs.catchupReplay(cs.Height) + switch { + case err == nil: + break LOOP + case !IsDataCorruptionError(err): + cs.Logger.Error("Error on catchup replay. Proceeding to start State anyway", "err", err) + break LOOP + case repairAttempted: + return err + } + + cs.Logger.Error("WAL file is corrupted, attempting repair", "err", err) + + // 1) prep work + if err := cs.wal.Stop(); err != nil { + return err + } + repairAttempted = true + + // 2) backup original WAL file + corruptedFile := fmt.Sprintf("%s.CORRUPTED", cs.config.WalFile()) + if err := tmos.CopyFile(cs.config.WalFile(), corruptedFile); err != nil { + return err + } + cs.Logger.Info("Backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + + // 3) try to repair (WAL file will be overwritten!) + if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { + cs.Logger.Error("WAL repair failed", "err", err) + return err + } + cs.Logger.Info("Successful repair") + + // reload WAL file + if err := cs.loadWalFile(); err != nil { + return err + } + } + } + + if err := cs.evsw.Start(); err != nil { + return err } // we need the timeoutRoutine for replay so @@ -304,31 +370,9 @@ func (cs *State) OnStart() error { return err } - // we may have lost some votes if the process crashed - // reload from consensus log to catchup - if cs.doWALCatchup { - if err := cs.catchupReplay(cs.Height); err != nil { - // don't try to recover from data corruption error - if IsDataCorruptionError(err) { - cs.Logger.Error("Encountered corrupt WAL file", "err", err.Error()) - cs.Logger.Error("Please repair the WAL file before restarting") - fmt.Println(`You can attempt to repair the WAL as follows: - ----- -WALFILE=~/.tendermint/data/cs.wal/wal -cp $WALFILE ${WALFILE}.bak # backup the file -go run scripts/wal2json/main.go $WALFILE > wal.json # this will panic, but can be ignored -rm $WALFILE # remove the corrupt file -go run scripts/json2wal/main.go wal.json $WALFILE # rebuild the file without corruption -----`) - - return err - } - - cs.Logger.Error("Error on catchup replay. Proceeding to start State anyway", "err", err.Error()) - // NOTE: if we ever do return an error here, - // make sure to stop the timeoutTicker - } + // Double Signing Risk Reduction + if err := cs.checkDoubleSigningRisk(cs.Height); err != nil { + return err } // now start the receiveRoutine @@ -352,10 +396,25 @@ func (cs *State) startRoutines(maxSteps int) { go cs.receiveRoutine(maxSteps) } +// loadWalFile loads WAL data from file. It overwrites cs.wal. +func (cs *State) loadWalFile() error { + wal, err := cs.OpenWAL(cs.config.WalFile()) + if err != nil { + cs.Logger.Error("Error loading State wal", "err", err) + return err + } + cs.wal = wal + return nil +} + // OnStop implements service.Service. func (cs *State) OnStop() { - cs.evsw.Stop() - cs.timeoutTicker.Stop() + if err := cs.evsw.Stop(); err != nil { + cs.Logger.Error("error trying to stop eventSwitch", "error", err) + } + if err := cs.timeoutTicker.Stop(); err != nil { + cs.Logger.Error("error trying to stop timeoutTicket", "error", err) + } // WAL is stopped in receiveRoutine. } @@ -366,15 +425,17 @@ func (cs *State) Wait() { <-cs.done } -// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability +// OpenWAL opens a file to log all consensus messages and timeouts for +// deterministic accountability. func (cs *State) OpenWAL(walFile string) (WAL, error) { wal, err := NewWAL(walFile) if err != nil { - cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err) + cs.Logger.Error("Failed to open WAL", "file", walFile, "err", err) return nil, err } wal.SetLogger(cs.Logger.With("wal", walFile)) if err := wal.Start(); err != nil { + cs.Logger.Error("Failed to start WAL", "err", err) return nil, err } return wal, nil @@ -413,7 +474,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error { +func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { if peerID == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} @@ -435,7 +496,7 @@ func (cs *State) SetProposalAndBlock( if err := cs.SetProposal(proposal, peerID); err != nil { return err } - for i := 0; i < parts.Total(); i++ { + for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { return err @@ -452,20 +513,20 @@ func (cs *State) updateHeight(height int64) { cs.Height = height } -func (cs *State) updateRoundStep(round int, step cstypes.RoundStepType) { +func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { cs.Round = round cs.Step = step } // enterNewRound(height, 0) at cs.StartTime. func (cs *State) scheduleRound0(rs *cstypes.RoundState) { - //cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + // cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) sleepDuration := rs.StartTime.Sub(tmtime.Now()) cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } // Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) -func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) { +func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int32, step cstypes.RoundStepType) { cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) } @@ -486,18 +547,17 @@ func (cs *State) sendInternalMessage(mi msgInfo) { // Reconstruct LastCommit from SeenCommit, which we saved along with the block, // (which happens even before saving the state) func (cs *State) reconstructLastCommit(state sm.State) { - if state.LastBlockHeight == 0 { - return - } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) if seenCommit == nil { panic(fmt.Sprintf("Failed to reconstruct LastCommit: seen commit for height %v not found", state.LastBlockHeight)) } + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") } + cs.LastCommit = lastPrecommits } @@ -508,41 +568,62 @@ func (cs *State) updateToState(state sm.State) { panic(fmt.Sprintf("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } - if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { - // This might happen when someone else is mutating cs.state. - // Someone forgot to pass in state.Copy() somewhere?! - panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", - cs.state.LastBlockHeight+1, cs.Height)) - } - - // If state isn't further out than cs.state, just ignore. - // This happens when SwitchToConsensus() is called in the reactor. - // We don't want to reset e.g. the Votes, but we still want to - // signal the new round step, because other services (eg. txNotifier) - // depend on having an up-to-date peer state! - if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) { - cs.Logger.Info( - "Ignoring updateToState()", - "newHeight", - state.LastBlockHeight+1, - "oldHeight", - cs.state.LastBlockHeight+1) - cs.newStep() - return + if !cs.state.IsEmpty() { + if cs.state.LastBlockHeight > 0 && cs.state.LastBlockHeight+1 != cs.Height { + // This might happen when someone else is mutating cs.state. + // Someone forgot to pass in state.Copy() somewhere?! + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cs.state.LastBlockHeight+1, cs.Height)) + } + if cs.state.LastBlockHeight > 0 && cs.Height == cs.state.InitialHeight { + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight %v, expected 0 for initial height %v", + cs.state.LastBlockHeight, cs.state.InitialHeight)) + } + + // If state isn't further out than cs.state, just ignore. + // This happens when SwitchToConsensus() is called in the reactor. + // We don't want to reset e.g. the Votes, but we still want to + // signal the new round step, because other services (eg. txNotifier) + // depend on having an up-to-date peer state! + if state.LastBlockHeight <= cs.state.LastBlockHeight { + cs.Logger.Info( + "Ignoring updateToState()", + "newHeight", + state.LastBlockHeight+1, + "oldHeight", + cs.state.LastBlockHeight+1) + cs.newStep() + return + } } // Reset fields based on state. validators := state.Validators - lastPrecommits := (*types.VoteSet)(nil) - if cs.CommitRound > -1 && cs.Votes != nil { + + switch { + case state.LastBlockHeight == 0: // Very first commit should be empty. + cs.LastCommit = (*types.VoteSet)(nil) + case cs.CommitRound > -1 && cs.Votes != nil: // Otherwise, use cs.Votes if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { - panic("updateToState(state) called but last Precommit round didn't have +2/3") + panic(fmt.Sprintf("Wanted to form a Commit, but Precommits (H/R: %d/%d) didn't have 2/3+: %v", + state.LastBlockHeight, + cs.CommitRound, + cs.Votes.Precommits(cs.CommitRound))) } - lastPrecommits = cs.Votes.Precommits(cs.CommitRound) + cs.LastCommit = cs.Votes.Precommits(cs.CommitRound) + case cs.LastCommit == nil: + // NOTE: when Tendermint starts, it has no votes. reconstructLastCommit + // must be called to reconstruct LastCommit from SeenCommit. + panic(fmt.Sprintf("LastCommit cannot be empty after initial block (H:%d)", + state.LastBlockHeight+1, + )) } // Next desired block height height := state.LastBlockHeight + 1 + if height == 1 { + height = state.InitialHeight + } // RoundState fields cs.updateHeight(height) @@ -570,7 +651,6 @@ func (cs *State) updateToState(state sm.State) { cs.ValidBlockParts = nil cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) cs.CommitRound = -1 - cs.LastCommit = lastPrecommits cs.LastValidators = state.LastValidators cs.TriggeredTimeoutPrecommit = false @@ -582,11 +662,15 @@ func (cs *State) updateToState(state sm.State) { func (cs *State) newStep() { rs := cs.RoundStateEvent() - cs.wal.Write(rs) + if err := cs.wal.Write(rs); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } cs.nSteps++ // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { - cs.eventBus.PublishEventNewRoundStep(rs) + if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { + cs.Logger.Error("Error publishing new round step", "err", err) + } cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) } } @@ -606,7 +690,9 @@ func (cs *State) receiveRoutine(maxSteps int) { // priv_val tracks LastSig // close wal now that we're done writing to it - cs.wal.Stop() + if err := cs.wal.Stop(); err != nil { + cs.Logger.Error("error trying to stop wal", "error", err) + } cs.wal.Wait() close(cs.done) @@ -642,7 +728,9 @@ func (cs *State) receiveRoutine(maxSteps int) { case <-cs.txNotifier.TxsAvailable(): cs.handleTxsAvailable() case mi = <-cs.peerMsgQueue: - cs.wal.Write(mi) + if err := cs.wal.Write(mi); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } // handles proposals, block parts, votes // may generate internal events (votes, complete proposals, 2/3 majorities) cs.handleMsg(mi) @@ -663,7 +751,9 @@ func (cs *State) receiveRoutine(maxSteps int) { // handles proposals, block parts, votes cs.handleMsg(mi) case ti := <-cs.timeoutTicker.Chan(): // tockChan: - cs.wal.Write(ti) + if err := cs.wal.Write(ti); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } // if the timeout is relevant to the rs // go to the next step cs.handleTimeout(ti, rs) @@ -734,11 +824,9 @@ func (cs *State) handleMsg(mi msgInfo) { return } - if err != nil { // nolint:staticcheck - // Causes TestReactorValidatorSetChanges to timeout - // https://github.com/tendermint/tendermint/issues/3406 - // cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, - // "peer", peerID, "err", err, "msg", msg) + if err != nil { + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, + "peer", peerID, "err", err, "msg", msg) } } @@ -763,13 +851,19 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { case cstypes.RoundStepNewRound: cs.enterPropose(ti.Height, 0) case cstypes.RoundStepPropose: - cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout propose", "err", err) + } cs.enterPrevote(ti.Height, ti.Round) case cstypes.RoundStepPrevoteWait: - cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } cs.enterPrecommit(ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: - cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } cs.enterPrecommit(ti.Height, ti.Round) cs.enterNewRound(ti.Height, ti.Round+1) default: @@ -812,7 +906,7 @@ func (cs *State) handleTxsAvailable() { // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) // NOTE: cs.StartTime was already set for height. -func (cs *State) enterNewRound(height int64, round int) { +func (cs *State) enterNewRound(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { @@ -836,7 +930,7 @@ func (cs *State) enterNewRound(height int64, round int) { validators := cs.Validators if cs.Round < round { validators = validators.Copy() - validators.IncrementProposerPriority(round - cs.Round) + validators.IncrementProposerPriority(tmmath.SafeSubInt32(round, cs.Round)) } // Setup new round @@ -854,10 +948,12 @@ func (cs *State) enterNewRound(height int64, round int) { cs.ProposalBlock = nil cs.ProposalBlockParts = nil } - cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping + cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false - cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()) + if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { + cs.Logger.Error("Error publishing new round", "err", err) + } cs.metrics.Rounds.Set(float64(round)) // Wait for txs to be available in the mempool @@ -877,7 +973,7 @@ func (cs *State) enterNewRound(height int64, round int) { // needProofBlock returns true on the first height (so the genesis app hash is signed right away) // and where the last block (height-1) caused the app hash to change func (cs *State) needProofBlock(height int64) bool { - if height == 1 { + if height == cs.state.InitialHeight { return true } @@ -892,7 +988,7 @@ func (cs *State) needProofBlock(height int64) bool { // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): // after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool -func (cs *State) enterPropose(height int64, round int) { +func (cs *State) enterPropose(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { @@ -930,14 +1026,13 @@ func (cs *State) enterPropose(height int64, round int) { } logger.Debug("This node is a validator") - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { + if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - logger.Error("Error on retrival of pubkey", "err", err) + logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) return } - address := pubKey.Address() + address := cs.privValidatorPubKey.Address() // if not a validator, we're done if !cs.Validators.HasAddress(address) { @@ -965,7 +1060,7 @@ func (cs *State) isProposer(address []byte) bool { return bytes.Equal(cs.Validators.GetProposer().Address, address) } -func (cs *State) defaultDecideProposal(height int64, round int) { +func (cs *State) defaultDecideProposal(height int64, round int32) { var block *types.Block var blockParts *types.PartSet @@ -983,16 +1078,20 @@ func (cs *State) defaultDecideProposal(height int64, round int) { // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. - cs.wal.FlushAndSync() + if err := cs.wal.FlushAndSync(); err != nil { + cs.Logger.Error("Error flushing to disk") + } // Make proposal - propBlockID := types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()} + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { + p := proposal.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + proposal.Signature = p.Signature // send proposal and block parts on internal msg queue cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) - for i := 0; i < blockParts.Total(); i++ { + for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) } @@ -1027,9 +1126,13 @@ func (cs *State) isProposalComplete() bool { // NOTE: keep it side-effect free for clarity. // CONTRACT: cs.privValidator is not nil. func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { + if cs.privValidator == nil { + panic("entered createProposalBlock with privValidator being nil") + } + var commit *types.Commit switch { - case cs.Height == 1: + case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. commit = types.NewCommit(0, 0, types.BlockID{}, nil) @@ -1041,17 +1144,13 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa return } - if cs.privValidator == nil { - panic("entered createProposalBlock with privValidator being nil") - } - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { + if cs.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - cs.Logger.Error("Error on retrival of pubkey", "err", err) + cs.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) return } - proposerAddr := pubKey.Address() + proposerAddr := cs.privValidatorPubKey.Address() return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) } @@ -1060,7 +1159,7 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa // Enter: proposal block and POL is ready. // Prevote for LockedBlock if we're locked, or ProposalBlock if valid. // Otherwise vote nil. -func (cs *State) enterPrevote(height int64, round int) { +func (cs *State) enterPrevote(height int64, round int32) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { cs.Logger.Debug(fmt.Sprintf( "enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", @@ -1087,20 +1186,20 @@ func (cs *State) enterPrevote(height int64, round int) { // (so we have more time to try and collect +2/3 prevotes for a single block) } -func (cs *State) defaultDoPrevote(height int64, round int) { +func (cs *State) defaultDoPrevote(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) // If a block is locked, prevote that. if cs.LockedBlock != nil { - logger.Info("enterPrevote: Block was locked") - cs.signAddVote(types.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + logger.Info("enterPrevote: Already locked on a block, prevoting locked block") + cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) return } // If ProposalBlock is nil, prevote nil. if cs.ProposalBlock == nil { logger.Info("enterPrevote: ProposalBlock is nil") - cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1109,7 +1208,7 @@ func (cs *State) defaultDoPrevote(height int64, round int) { if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) - cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1117,11 +1216,11 @@ func (cs *State) defaultDoPrevote(height int64, round int) { // NOTE: the proposal signature is validated when it is received, // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) logger.Info("enterPrevote: ProposalBlock is valid") - cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) } // Enter: any +2/3 prevotes at next round. -func (cs *State) enterPrevoteWait(height int64, round int) { +func (cs *State) enterPrevoteWait(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { @@ -1155,7 +1254,7 @@ func (cs *State) enterPrevoteWait(height int64, round int) { // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. -func (cs *State) enterPrecommit(height int64, round int) { +func (cs *State) enterPrecommit(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { @@ -1187,12 +1286,14 @@ func (cs *State) enterPrecommit(height int64, round int) { } else { logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") } - cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) return } // At this point +2/3 prevoted for a particular block or nil. - cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventPolka(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing polka", "err", err) + } // the latest POLRound should be this round. polRound, _ := cs.Votes.POLInfo() @@ -1209,9 +1310,11 @@ func (cs *State) enterPrecommit(height int64, round int) { cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing event unlock", "err", err) + } } - cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1221,8 +1324,10 @@ func (cs *State) enterPrecommit(height int64, round int) { if cs.LockedBlock.HashesTo(blockID.Hash) { logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") cs.LockedRound = round - cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) - cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) + if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing event relock", "err", err) + } + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } @@ -1236,28 +1341,32 @@ func (cs *State) enterPrecommit(height int64, round int) { cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock cs.LockedBlockParts = cs.ProposalBlockParts - cs.eventBus.PublishEventLock(cs.RoundStateEvent()) - cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) + if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing event lock", "err", err) + } + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } // There was a polka in this round for a block we don't have. // Fetch that block, unlock, and precommit nil. // The +2/3 prevotes for this round is the POL for our unlock. - // TODO: In the future save the POL prevotes for justification. + logger.Info("enterPrecommit: +2/3 prevotes for a block we don't have. Voting nil", "blockID", blockID) cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil - if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + } + if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing event unlock", "err", err) } - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. -func (cs *State) enterPrecommitWait(height int64, round int) { +func (cs *State) enterPrecommitWait(height int64, round int32) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { @@ -1281,11 +1390,10 @@ func (cs *State) enterPrecommitWait(height int64, round int) { // Wait for some more precommits; enterNewRound cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) - } // Enter: +2/3 precommits for block -func (cs *State) enterCommit(height int64, commitRound int) { +func (cs *State) enterCommit(height int64, commitRound int32) { logger := cs.Logger.With("height", height, "commitRound", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { @@ -1328,7 +1436,7 @@ func (cs *State) enterCommit(height int64, commitRound int) { // If we don't have the block being committed, set up to get it. if !cs.ProposalBlock.HashesTo(blockID.Hash) { - if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { logger.Info( "Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", @@ -1338,8 +1446,10 @@ func (cs *State) enterCommit(height int64, commitRound int) { // We're getting the wrong block. // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) - cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing valid block", "err", err) + } cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) } // else { @@ -1393,16 +1503,16 @@ func (cs *State) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) + panic("Cannot finalizeCommit, commit does not have two thirds majority") } - if !blockParts.HasHeader(blockID.PartsHeader) { - panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) + if !blockParts.HasHeader(blockID.PartSetHeader) { + panic("Expected ProposalBlockParts header to be commit header") } if !block.HashesTo(blockID.Hash) { - panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + panic("Cannot finalizeCommit, ProposalBlock does not hash to commit hash") } if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) + panic(fmt.Errorf("+2/3 committed an invalid block: %w", err)) } cs.Logger.Info("Finalizing commit of block with N txs", @@ -1458,14 +1568,10 @@ func (cs *State) finalizeCommit(height int64) { var retainHeight int64 stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( stateCopy, - types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, + types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}, block) if err != nil { - cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) - err := tmos.Kill() - if err != nil { - cs.Logger.Error("Failed to kill this process - please do so manually", "err", err) - } + cs.Logger.Error("Error on ApplyBlock", "err", err) return } @@ -1489,6 +1595,11 @@ func (cs *State) finalizeCommit(height int64) { fail.Fail() // XXX + // Private validator might have changed it's key pair => refetch pubkey. + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } + // cs.StartTime is already set. // Schedule Round0 to start soon. cs.scheduleRound0(&cs.RoundState) @@ -1508,7 +1619,7 @@ func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { if err != nil { return 0, fmt.Errorf("failed to prune block store: %w", err) } - err = sm.PruneStates(cs.blockExec.DB(), base, retainHeight) + err = cs.blockExec.Store().PruneStates(base, retainHeight) if err != nil { return 0, fmt.Errorf("failed to prune state database: %w", err) } @@ -1526,18 +1637,28 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { // height=0 -> MissingValidators and MissingValidatorsPower are both 0. // Remember that the first LastCommit is intentionally empty, so it's not // fair to increment missing validators number. - if height > 1 { + if height > cs.state.InitialHeight { // Sanity check that commit size matches validator set size - only applies // after first block. var ( commitSize = block.LastCommit.Size() valSetLen = len(cs.LastValidators.Validators) + address types.Address ) if commitSize != valSetLen { panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) } + if cs.privValidator != nil { + if cs.privValidatorPubKey == nil { + // Metrics won't be updated, but it's not critical. + cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet)) + } else { + address = cs.privValidatorPubKey.Address() + } + } + for i, val := range cs.LastValidators.Validators { commitSig := block.LastCommit.Signatures[i] if commitSig.Absent() { @@ -1545,44 +1666,43 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { missingValidatorsPower += val.VotingPower } - if cs.privValidator != nil { - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { - // Metrics won't be updated, but it's not critical. - cs.Logger.Error("Error on retrival of pubkey", "err", err) - continue + if bytes.Equal(val.Address, address) { + label := []string{ + "validator_address", val.Address.String(), } - - if bytes.Equal(val.Address, pubKey.Address()) { - label := []string{ - "validator_address", val.Address.String(), - } - cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) - if commitSig.ForBlock() { - cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) - } else { - cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) - } + cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) + if commitSig.ForBlock() { + cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) + } else { + cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) } } + } } cs.metrics.MissingValidators.Set(float64(missingValidators)) cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) - cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence))) - byzantineValidatorsPower := int64(0) + // NOTE: byzantine validators power and count is only for consensus evidence i.e. duplicate vote + var ( + byzantineValidatorsPower = int64(0) + byzantineValidatorsCount = int64(0) + ) for _, ev := range block.Evidence.Evidence { - if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil { - byzantineValidatorsPower += val.VotingPower + if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { + if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil { + byzantineValidatorsCount++ + byzantineValidatorsPower += val.VotingPower + } } } + cs.metrics.ByzantineValidators.Set(float64(byzantineValidatorsCount)) cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) if lastBlockMeta != nil { - cs.metrics.BlockIntervalSeconds.Set( + cs.metrics.BlockIntervalSeconds.Observe( block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), ) } @@ -1614,17 +1734,21 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { return ErrInvalidProposalPOLRound } + p := proposal.ToProto() // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) { + if !cs.Validators.GetProposer().PubKey.VerifySignature( + types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, + ) { return ErrInvalidProposalSignature } + proposal.Signature = p.Signature cs.Proposal = proposal // We don't update cs.ProposalBlockParts if it is already set. // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! if cs.ProposalBlockParts == nil { - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartsHeader) + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } cs.Logger.Info("Received proposal", "proposal", proposal) return nil @@ -1655,19 +1779,34 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add if err != nil { return added, err } - if added && cs.ProposalBlockParts.IsComplete() { - // Added and completed! - _, err = cdc.UnmarshalBinaryLengthPrefixedReader( - cs.ProposalBlockParts.GetReader(), - &cs.ProposalBlock, - cs.state.ConsensusParams.Block.MaxBytes, + if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes { + return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)", + cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes, ) + } + if added && cs.ProposalBlockParts.IsComplete() { + bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) if err != nil { return added, err } + + var pbb = new(tmproto.Block) + err = proto.Unmarshal(bz, pbb) + if err != nil { + return added, err + } + + block, err := types.BlockFromProto(pbb) + if err != nil { + return added, err + } + + cs.ProposalBlock = block // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) - cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()) + if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { + cs.Logger.Error("Error publishing event complete proposal", "err", err) + } // Update Valid* if we can. prevotes := cs.Votes.Prevotes(cs.Round) @@ -1710,15 +1849,12 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { // But if it's a conflicting sig, add it to the cs.evpool. // If it's otherwise invalid, punish peer. // nolint: gocritic - if err == ErrVoteHeightMismatch { - return added, err - } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { - return false, errors.Wrap(err, "can't get pubkey") + if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { + if cs.privValidatorPubKey == nil { + return false, errPubKeyIsNotSet } - if bytes.Equal(vote.ValidatorAddress, pubKey.Address()) { + if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { cs.Logger.Error( "Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", @@ -1729,7 +1865,21 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { vote.Type) return added, err } - cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) + var timestamp time.Time + if voteErr.VoteA.Height == cs.state.InitialHeight { + timestamp = cs.state.LastBlockTime // genesis time + } else { + timestamp = sm.MedianTime(cs.LastCommit.MakeCommit(), cs.LastValidators) + } + // form duplicate vote evidence from the conflicting votes and send it across to the + // evidence pool + ev := types.NewDuplicateVoteEvidence(voteErr.VoteA, voteErr.VoteB, timestamp, cs.Validators) + evidenceErr := cs.evpool.AddEvidenceFromConsensus(ev) + if evidenceErr != nil { + cs.Logger.Error("Failed to add evidence to the evidence pool", "err", evidenceErr) + } else { + cs.Logger.Debug("Added evidence to the evidence pool", "ev", ev) + } return added, err } else if err == types.ErrVoteNonDeterministicSignature { cs.Logger.Debug("Vote has non-deterministic signature", "err", err) @@ -1765,19 +1915,21 @@ func (cs *State) addVote( // A precommit for the previous height? // These come in while we wait timeoutCommit - if vote.Height+1 == cs.Height { - if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.PrecommitType) { - // TODO: give the reason .. - // fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.") - return added, ErrVoteHeightMismatch + if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { + if cs.Step != cstypes.RoundStepNewHeight { + // Late precommit at prior height is ignored + cs.Logger.Debug("Precommit vote came in after commit timeout and has been ignored", "vote", vote) + return } added, err = cs.LastCommit.AddVote(vote) if !added { - return added, err + return } cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) - cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + return added, err + } cs.evsw.FireEvent(types.EventVote, vote) // if we can skip timeoutCommit and have all the votes now, @@ -1793,7 +1945,6 @@ func (cs *State) addVote( // Height mismatch is ignored. // Not necessarily a bad peer, but not favourable behaviour. if vote.Height != cs.Height { - err = ErrVoteHeightMismatch cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID) return } @@ -1805,11 +1956,13 @@ func (cs *State) addVote( return } - cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + return added, err + } cs.evsw.FireEvent(types.EventVote, vote) switch vote.Type { - case types.PrevoteType: + case tmproto.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) @@ -1831,7 +1984,9 @@ func (cs *State) addVote( cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { + return added, err + } } // Update Valid* if we can. @@ -1851,11 +2006,13 @@ func (cs *State) addVote( // We're getting the wrong block. cs.ProposalBlock = nil } - if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) - cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + return added, err + } } } @@ -1878,7 +2035,7 @@ func (cs *State) addVote( } } - case types.PrecommitType: + case tmproto.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) @@ -1901,7 +2058,7 @@ func (cs *State) addVote( } default: - panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-amino should prevent this. + panic(fmt.Sprintf("Unexpected vote type %v", vote.Type)) } return added, err @@ -1909,19 +2066,20 @@ func (cs *State) addVote( // CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( - msgType types.SignedMsgType, + msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, ) (*types.Vote, error) { // Flush the WAL. Otherwise, we may not recompute the same vote to sign, // and the privValidator will refuse to sign anything. - cs.wal.FlushAndSync() + if err := cs.wal.FlushAndSync(); err != nil { + return nil, err + } - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { - return nil, errors.Wrap(err, "can't get pubkey") + if cs.privValidatorPubKey == nil { + return nil, errPubKeyIsNotSet } - addr := pubKey.Address() + addr := cs.privValidatorPubKey.Address() valIdx, _ := cs.Validators.GetByAddress(addr) vote := &types.Vote{ @@ -1931,10 +2089,12 @@ func (cs *State) signVote( Round: cs.Round, Timestamp: cs.voteTime(), Type: msgType, - BlockID: types.BlockID{Hash: hash, PartsHeader: header}, + BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } + v := vote.ToProto() + err := cs.privValidator.SignVote(cs.state.ChainID, v) + vote.Signature = v.Signature - err = cs.privValidator.SignVote(cs.state.ChainID, vote) return vote, err } @@ -1943,12 +2103,12 @@ func (cs *State) voteTime() time.Time { minVoteTime := now // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. - timeIotaMs := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond + timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond if cs.LockedBlock != nil { // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html - minVoteTime = cs.LockedBlock.Time.Add(timeIotaMs) + minVoteTime = cs.LockedBlock.Time.Add(timeIota) } else if cs.ProposalBlock != nil { - minVoteTime = cs.ProposalBlock.Time.Add(timeIotaMs) + minVoteTime = cs.ProposalBlock.Time.Add(timeIota) } if now.After(minVoteTime) { @@ -1958,20 +2118,19 @@ func (cs *State) voteTime() time.Time { } // sign the vote and publish on internalMsgQueue -func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { if cs.privValidator == nil { // the node does not have a key return nil } - pubKey, err := cs.privValidator.GetPubKey() - if err != nil { + if cs.privValidatorPubKey == nil { // Vote won't be signed, but it's not critical. - cs.Logger.Error("Error on retrival of pubkey", "err", err) + cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet)) return nil } // If the node not in the validator set, do nothing. - if !cs.Validators.HasAddress(pubKey.Address()) { + if !cs.Validators.HasAddress(cs.privValidatorPubKey.Address()) { return nil } @@ -1979,18 +2138,57 @@ func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header ty vote, err := cs.signVote(msgType, hash, header) if err == nil { cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) - cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) return vote } - //if !cs.replayMode { + // if !cs.replayMode { cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) - //} + // } + return nil +} + +// updatePrivValidatorPubKey get's the private validator public key and +// memoizes it. This func returns an error if the private validator is not +// responding or responds with an error. +func (cs *State) updatePrivValidatorPubKey() error { + if cs.privValidator == nil { + return nil + } + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return err + } + cs.privValidatorPubKey = pubKey + return nil +} + +// look back to check existence of the node's consensus votes before joining consensus +func (cs *State) checkDoubleSigningRisk(height int64) error { + if cs.privValidator != nil && cs.privValidatorPubKey != nil && cs.config.DoubleSignCheckHeight > 0 && height > 0 { + valAddr := cs.privValidatorPubKey.Address() + doubleSignCheckHeight := cs.config.DoubleSignCheckHeight + if doubleSignCheckHeight > height { + doubleSignCheckHeight = height + } + for i := int64(1); i < doubleSignCheckHeight; i++ { + lastCommit := cs.blockStore.LoadSeenCommit(height - i) + if lastCommit != nil { + for sigIdx, s := range lastCommit.Signatures { + if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { + cs.Logger.Info("Found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) + return ErrSignatureFoundInPastBlocks + } + } + } + } + } return nil } //--------------------------------------------------------- -func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int { +func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int { if h1 < h2 { return -1 } else if h1 > h2 { @@ -2008,3 +2206,39 @@ func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 } return 0 } + +// repairWalFile decodes messages from src (until the decoder errors) and +// writes them to dst. +func repairWalFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + var ( + dec = NewWALDecoder(in) + enc = NewWALEncoder(out) + ) + + // best-case repair (until first error is encountered) + for { + msg, err := dec.Decode() + if err != nil { + break + } + + err = enc.Encode(msg) + if err != nil { + return fmt.Errorf("failed to encode msg: %w", err) + } + } + + return nil +} diff --git a/consensus/state_test.go b/consensus/state_test.go index 3de5a37d1..fa2aafb56 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -10,11 +10,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/counter" cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" p2pmock "github.com/tendermint/tendermint/p2p/mock" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -26,6 +29,7 @@ x * TestProposerSelection2 - round robin ordering, round 2++ x * TestEnterProposeNoValidator - timeout into prevote round x * TestEnterPropose - finish propose without timing out (we have the proposal) x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil +x * TestOversizedBlock - block with too many txs should be rejected FullRoundSuite x * TestFullRound1 - 1 val, full successful round x * TestFullRoundNil - 1 val, full round of nil @@ -76,7 +80,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) // Wait for new round so next validator is set. ensureNewRound(newRoundCh, height+1, 0) @@ -100,27 +104,27 @@ func TestStateProposerSelection2(t *testing.T) { incrementRound(vss[1:]...) incrementRound(vss[1:]...) - round := 2 + var round int32 = 2 startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round - for i := 0; i < len(vss); i++ { + for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - pvk, err := vss[(i+round)%len(vss)].GetPubKey() + pvk, err := vss[int(i+round)%len(vss)].GetPubKey() require.NoError(t, err) addr := pvk.Address() correctProposer := addr if !bytes.Equal(prop.Address, correctProposer) { panic(fmt.Sprintf( "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", - (i+2)%len(vss), + int(i+2)%len(vss), prop.Address)) } rs := cs1.GetRoundState() - signAddVotes(cs1, types.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(cs1, tmproto.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -187,7 +191,7 @@ func TestStateBadProposal(t *testing.T) { proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2) + propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) // make the second validator the proposer by incrementing round round++ @@ -201,12 +205,15 @@ func TestStateBadProposal(t *testing.T) { stateHash[0] = (stateHash[0] + 1) % 255 propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} proposal := types.NewProposal(vs2.Height, round, -1, blockID) - if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { + p := proposal.ToProto() + if err := vs2.SignProposal(config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } + proposal.Signature = p.Signature + // set the proposal block if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) @@ -223,13 +230,71 @@ func TestStateBadProposal(t *testing.T) { validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) // wait for precommit ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) +} + +func TestStateOversizedBlock(t *testing.T) { + cs1, vss := randState(2) + cs1.state.ConsensusParams.Block.MaxBytes = 2000 + height, round := cs1.Height, cs1.Round + vs2 := vss[1] + + partSize := types.BlockPartSizeBytes + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + + propBlock, _ := cs1.createProposalBlock() + propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} + propBlock.Header.DataHash = propBlock.Data.Hash() + + // make the second validator the proposer by incrementing round + round++ + incrementRound(vss[1:]...) + + propBlockParts := propBlock.MakePartSet(partSize) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := types.NewProposal(height, round, -1, blockID) + p := proposal.ToProto() + if err := vs2.SignProposal(config.ChainID(), p); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + proposal.Signature = p.Signature + + totalBytes := 0 + for i := 0; i < int(propBlockParts.Total()); i++ { + part := propBlockParts.GetPart(i) + totalBytes += len(part.Bytes) + } + + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + // start the machine + startTestRound(cs1, height, round) + + t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + + // c1 should log an error with the block part message as it exceeds the consensus params. The + // block is not added to cs.ProposalBlock so the node timeouts. + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + + // and then should send nil prevote and precommit regardless of whether other validators prevote and + // precommit on it + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) + signAddVotes(cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -242,11 +307,15 @@ func TestStateFullRound1(t *testing.T) { // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit // before consensus can move to the next height (and cause a race condition) - cs.eventBus.Stop() + if err := cs.eventBus.Stop(); err != nil { + t.Error(err) + } eventBus := types.NewEventBusWithBufferCapacity(0) eventBus.SetLogger(log.TestingLogger().With("module", "events")) cs.SetEventBus(eventBus) - eventBus.Start() + if err := eventBus.Start(); err != nil { + t.Error(err) + } voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) @@ -305,20 +374,20 @@ func TestStateFullRound2(t *testing.T) { // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() - propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() + propBlockHash, propPartSetHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() // prevote arrives from vs2: - signAddVotes(cs1, types.PrevoteType, propBlockHash, propPartsHeader, vs2) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propPartSetHeader, vs2) ensurePrevote(voteCh, height, round) // prevote - ensurePrecommit(voteCh, height, round) //precommit + ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(cs1, types.PrecommitType, propBlockHash, propPartsHeader, vs2) + signAddVotes(cs1, tmproto.PrecommitType, propBlockHash, propPartSetHeader, vs2) ensurePrecommit(voteCh, height, round) // wait to finish commit, propose in next height @@ -362,7 +431,7 @@ func TestStateLockNoPOL(t *testing.T) { // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, types.PrevoteType, theBlockHash, thePartSetHeader, vs2) + signAddVotes(cs1, tmproto.PrevoteType, theBlockHash, thePartSetHeader, vs2) ensurePrevote(voteCh, height, round) // prevote ensurePrecommit(voteCh, height, round) // precommit @@ -374,7 +443,7 @@ func TestStateLockNoPOL(t *testing.T) { hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(cs1, types.PrecommitType, hash, thePartSetHeader, vs2) + signAddVotes(cs1, tmproto.PrecommitType, hash, thePartSetHeader, vs2) ensurePrecommit(voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) @@ -407,7 +476,7 @@ func TestStateLockNoPOL(t *testing.T) { validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator - signAddVotes(cs1, types.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) // now we're going to enter prevote again, but with invalid args @@ -420,7 +489,7 @@ func TestStateLockNoPOL(t *testing.T) { validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 - signAddVotes(cs1, types.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) ensurePrecommit(voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args @@ -450,7 +519,7 @@ func TestStateLockNoPOL(t *testing.T) { ensurePrevote(voteCh, height, round) // prevote validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -460,7 +529,7 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes( cs1, - types.PrecommitType, + tmproto.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height @@ -496,7 +565,7 @@ func TestStateLockNoPOL(t *testing.T) { validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) // prevote for proposed block - signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) ensurePrevote(voteCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -505,14 +574,17 @@ func TestStateLockNoPOL(t *testing.T) { signAddVotes( cs1, - types.PrecommitType, + tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height ensurePrecommit(voteCh, height, round) } -// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka +// 4 vals in two rounds, +// in round one: v1 precommits, other 3 only prevote so the block isn't committed +// in round two: v1 prevotes the same block that the node is locked on +// the others prevote a new block hence v1 changes lock and precommits the new block with the others func TestStateLockPOLRelock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] @@ -548,20 +620,24 @@ func TestStateLockPOLRelock(t *testing.T) { ensurePrevote(voteCh, height, round) // prevote - signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + cs2 := newState(cs1.state, vs2, counter.NewApplication(true)) + prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) + if prop == nil || propBlock == nil { + t.Fatal("Failed to create proposal block with vs2") + } propBlockParts := propBlock.MakePartSet(partSize) propBlockHash := propBlock.Hash() + require.NotEqual(t, propBlockHash, theBlockHash) incrementRound(vs2, vs3, vs4) @@ -587,18 +663,19 @@ func TestStateLockPOLRelock(t *testing.T) { // but we should receive the proposal ensureNewProposal(proposalCh, height, round) - // go to prevote, prevote for locked block (not proposal), move on + // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) - // we should have unlocked and locked on the new block + // we should have unlocked and locked on the new block, sending a precommit for this new block validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) + // more prevote creating a majority on the new block and this is then committed + signAddVotes(cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) ensureNewBlockHeader(newBlockCh, height, propBlockHash) ensureNewRound(newRoundCh, height+1, 0) @@ -625,7 +702,6 @@ func TestStateLockPOLUnlock(t *testing.T) { /* Round1 (cs1, B) // B B B B // B nil B nil - eg. didn't see the 2/3 prevotes */ @@ -641,15 +717,15 @@ func TestStateLockPOLUnlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) - signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -667,7 +743,6 @@ func TestStateLockPOLUnlock(t *testing.T) { t.Log("#### ONTO ROUND 1") /* Round2 (vs2, C) // B nil nil nil // nil nil nil _ - cs1 unlocks! */ //XXX: this isnt guaranteed to get there before the timeoutPropose ... @@ -681,7 +756,7 @@ func TestStateLockPOLUnlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil ensureNewUnlock(unlockCh, height, round) @@ -691,10 +766,136 @@ func TestStateLockPOLUnlock(t *testing.T) { // NOTE: since we don't relock on nil, the lock round is -1 validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) ensureNewRound(newRoundCh, height, round+1) } +// 4 vals, v1 locks on proposed block in the first round but the other validators only prevote +// In the second round, v1 misses the proposal but sees a majority prevote an unknown block so +// v1 should unlock and precommit nil. In the third round another block is proposed, all vals +// prevote and now v1 can lock onto the third block and precommit that +func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { + cs1, vss := randState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() + voteCh := subscribeToVoter(cs1, addr) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + // everything done from perspective of cs1 + + /* + Round0 (cs1, A) // A A A A// A nil nil nil + */ + + // start round and wait for propose and prevote + startTestRound(cs1, height, round) + + ensureNewRound(newRoundCh, height, round) + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + firstBlockHash := rs.ProposalBlock.Hash() + firstBlockParts := rs.ProposalBlockParts.Header() + + ensurePrevote(voteCh, height, round) // prevote + + signAddVotes(cs1, tmproto.PrevoteType, firstBlockHash, firstBlockParts, vs2, vs3, vs4) + + ensurePrecommit(voteCh, height, round) // our precommit + // the proposed block should now be locked and our precommit added + validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) + + // add precommits from the rest + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + // before we timeout to the new round set the new proposal + cs2 := newState(cs1.state, vs2, counter.NewApplication(true)) + prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) + if prop == nil || propBlock == nil { + t.Fatal("Failed to create proposal block with vs2") + } + secondBlockParts := propBlock.MakePartSet(partSize) + secondBlockHash := propBlock.Hash() + require.NotEqual(t, secondBlockHash, firstBlockHash) + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + round++ // moving to the next round + + ensureNewRound(newRoundCh, height, round) + t.Log("### ONTO ROUND 1") + + /* + Round1 (vs2, B) // A B B B // nil nil nil nil) + */ + + // now we're on a new round but v1 misses the proposal + + // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], firstBlockHash) + + // now lets add prevotes from everyone else for the new block + signAddVotes(cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4) + + ensurePrecommit(voteCh, height, round) + // we should have unlocked and locked on the new block, sending a precommit for this new block + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + if err := cs1.SetProposalAndBlock(prop, propBlock, secondBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + // more prevote creating a majority on the new block and this is then committed + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + // before we timeout to the new round set the new proposal + cs3 := newState(cs1.state, vs3, counter.NewApplication(true)) + prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) + if prop == nil || propBlock == nil { + t.Fatal("Failed to create proposal block with vs2") + } + thirdPropBlockParts := propBlock.MakePartSet(partSize) + thirdPropBlockHash := propBlock.Hash() + require.NotEqual(t, secondBlockHash, thirdPropBlockHash) + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + + round++ // moving to the next round + ensureNewRound(newRoundCh, height, round) + t.Log("### ONTO ROUND 2") + + /* + Round2 (vs3, C) // C C C C // C nil nil nil) + */ + + if err := cs1.SetProposalAndBlock(prop, propBlock, thirdPropBlockParts, "some peer"); err != nil { + t.Fatal(err) + } + + ensurePrevote(voteCh, height, round) + // we are no longer locked to the first block so we should be able to prevote + validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash) + + signAddVotes(cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4) + + ensurePrecommit(voteCh, height, round) + // we have a majority, now vs1 can change lock to the third block + validatePrecommit(t, cs1, round, round, vss[0], thirdPropBlockHash, thirdPropBlockHash) +} + // 4 vals // a polka at round 1 but we miss it // then a polka at round 2 that we lock on @@ -727,12 +928,12 @@ func TestStateLockPOLSafety1(t *testing.T) { validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it - prevotes := signVotes(types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + prevotes := signVotes(tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // cs1 precommit nil ensurePrecommit(voteCh, height, round) @@ -772,13 +973,13 @@ func TestStateLockPOLSafety1(t *testing.T) { validatePrevote(t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -839,10 +1040,10 @@ func TestStateLockPOLSafety2(t *testing.T) { _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) - propBlockID0 := types.BlockID{Hash: propBlockHash0, PartsHeader: propBlockParts0.Header()} + propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) // the block for round 1 prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -865,15 +1066,15 @@ func TestStateLockPOLSafety2(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash1) - signAddVotes(cs1, types.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) incrementRound(vs2, vs3, vs4) @@ -883,9 +1084,13 @@ func TestStateLockPOLSafety2(t *testing.T) { round++ // moving to the next round // in round 2 we see the polkad block from round 0 newProp := types.NewProposal(height, round, 0, propBlockID0) - if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { + p := newProp.ToProto() + if err := vs3.SignProposal(config.ChainID(), p); err != nil { t.Fatal(err) } + + newProp.Signature = p.Signature + if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { t.Fatal(err) } @@ -941,13 +1146,13 @@ func TestProposeValidBlock(t *testing.T) { validatePrevote(t, cs1, round, vss[0], propBlockHash) // the others sign a polka - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -964,7 +1169,7 @@ func TestProposeValidBlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) - signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewUnlock(unlockCh, height, round) @@ -975,7 +1180,7 @@ func TestProposeValidBlock(t *testing.T) { incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round += 2 // moving to the next round @@ -1031,10 +1236,10 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { validatePrevote(t, cs1, round, vss[0], propBlockHash) // vs2 send prevote for propBlock - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) // vs3 send prevote nil - signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3) + signAddVotes(cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -1049,7 +1254,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == -1) // vs2 send (delayed) prevote for propBlock - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) ensureNewValidBlock(validBlockCh, height, round) @@ -1096,7 +1301,7 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { propBlockParts := propBlock.MakePartSet(partSize) // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -1131,7 +1336,7 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) @@ -1159,7 +1364,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1195,7 +1400,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1215,7 +1420,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, 1 + height, round := cs1.Height, int32(1) timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -1229,7 +1434,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) @@ -1242,7 +1447,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, 1 + height, round := cs1.Height, int32(1) incrementRound(vs2, vs3, vs4) @@ -1260,7 +1465,7 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) rs := cs1.GetRoundState() @@ -1276,7 +1481,7 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { func TestCommitFromPreviousRound(t *testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - height, round := cs1.Height, 1 + height, round := cs1.Height, int32(1) partSize := types.BlockPartSizeBytes @@ -1293,7 +1498,7 @@ func TestCommitFromPreviousRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) ensureNewValidBlock(validBlockCh, height, round) @@ -1323,7 +1528,10 @@ func (n *fakeTxNotifier) Notify() { n.ch <- struct{}{} } -func TestStartNextHeightCorrectly(t *testing.T) { +// 2 vals precommit votes for a block but node times out waiting for the third. Move to next round +// and third precommit arrives which leads to the commit of that header and the correct +// start of the next round +func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config.Consensus.SkipTimeoutCommit = false cs1, vss := randState(4) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} @@ -1333,6 +1541,7 @@ func TestStartNextHeightCorrectly(t *testing.T) { proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) @@ -1353,20 +1562,23 @@ func TestStartNextHeightCorrectly(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) - signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) - time.Sleep(5 * time.Millisecond) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) - rs = cs1.GetRoundState() - assert.True(t, rs.TriggeredTimeoutPrecommit) + // wait till timeout occurs + ensurePrecommitTimeout(precommitTimeoutCh) + + ensureNewRound(newRoundCh, height, round+1) + + // majority is now reached + signAddVotes(cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) ensureNewBlockHeader(newBlockHeader, height, theBlockHash) @@ -1410,15 +1622,15 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], theBlockHash) - signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) - signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) ensureNewBlockHeader(newBlockHeader, height, theBlockHash) @@ -1464,7 +1676,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) <-timeoutWaitCh @@ -1472,7 +1684,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // away and ignore more prevotes (and thus fail to slash!) // add the conflicting vote - signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -1494,7 +1706,7 @@ func TestStateSlashingPrecommits(t *testing.T) { <-voteCh // prevote // add prevote from vs2 - signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) <-voteCh // precommit @@ -1502,13 +1714,13 @@ func TestStateSlashingPrecommits(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) // NOTE: we have to send the vote for different block first so we don't just go into precommit round right // away and ignore more prevotes (and thus fail to slash!) // add precommit from vs2 - signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -1548,17 +1760,17 @@ func TestStateHalt1(t *testing.T) { ensurePrevote(voteCh, height, round) - signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(cs1, tmproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, types.PrecommitType, propBlock.Hash(), propBlockParts.Header()) + precommit4 := signVote(vs4, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) incrementRound(vs2, vs3, vs4) @@ -1637,7 +1849,9 @@ func TestStateOutputVoteStats(t *testing.T) { // create dummy peer peer := p2pmock.NewPeer(nil) - vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) + randBytes := tmrand.Bytes(tmhash.Size) + + vote := signVote(vss[1], tmproto.PrecommitType, randBytes, types.PartSetHeader{}) voteMessage := &VoteMessage{vote} cs.handleMsg(msgInfo{voteMessage, peer.ID()}) @@ -1651,7 +1865,7 @@ func TestStateOutputVoteStats(t *testing.T) { // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) + vote = signVote(vss[1], tmproto.PrecommitType, randBytes, types.PartSetHeader{}) cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) diff --git a/consensus/types/codec.go b/consensus/types/codec.go deleted file mode 100644 index 69ac8c4a5..000000000 --- a/consensus/types/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - types.RegisterBlockAmino(cdc) -} diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 14cd04bf8..6a5c0b495 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -6,7 +6,10 @@ import ( "strings" "sync" + tmjson "github.com/tendermint/tendermint/libs/json" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -41,9 +44,9 @@ type HeightVoteSet struct { valSet *types.ValidatorSet mtx sync.Mutex - round int // max tracked round - roundVoteSets map[int]RoundVoteSet // keys: [0...round] - peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds + round int32 // max tracked round + roundVoteSets map[int32]RoundVoteSet // keys: [0...round] + peerCatchupRounds map[p2p.ID][]int32 // keys: peer.ID; values: at most 2 rounds } func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { @@ -60,8 +63,8 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { hvs.height = height hvs.valSet = valSet - hvs.roundVoteSets = make(map[int]RoundVoteSet) - hvs.peerCatchupRounds = make(map[p2p.ID][]int) + hvs.roundVoteSets = make(map[int32]RoundVoteSet) + hvs.peerCatchupRounds = make(map[p2p.ID][]int32) hvs.addRound(0) hvs.round = 0 @@ -73,20 +76,21 @@ func (hvs *HeightVoteSet) Height() int64 { return hvs.height } -func (hvs *HeightVoteSet) Round() int { +func (hvs *HeightVoteSet) Round() int32 { hvs.mtx.Lock() defer hvs.mtx.Unlock() return hvs.round } // Create more RoundVoteSets up to round. -func (hvs *HeightVoteSet) SetRound(round int) { +func (hvs *HeightVoteSet) SetRound(round int32) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - if hvs.round != 0 && (round < hvs.round+1) { + newRound := tmmath.SafeSubInt32(hvs.round, 1) + if hvs.round != 0 && (round < newRound) { panic("SetRound() must increment hvs.round") } - for r := hvs.round + 1; r <= round; r++ { + for r := newRound; r <= round; r++ { if _, ok := hvs.roundVoteSets[r]; ok { continue // Already exists because peerCatchupRounds. } @@ -95,13 +99,13 @@ func (hvs *HeightVoteSet) SetRound(round int) { hvs.round = round } -func (hvs *HeightVoteSet) addRound(round int) { +func (hvs *HeightVoteSet) addRound(round int32) { if _, ok := hvs.roundVoteSets[round]; ok { panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, @@ -132,25 +136,25 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, return } -func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet { +func (hvs *HeightVoteSet) Prevotes(round int32) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.PrevoteType) + return hvs.getVoteSet(round, tmproto.PrevoteType) } -func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet { +func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.PrecommitType) + return hvs.getVoteSet(round, tmproto.PrecommitType) } // Last round and blockID that has +2/3 prevotes for a particular block or nil. // Returns -1 if no such round exists. -func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { +func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) { hvs.mtx.Lock() defer hvs.mtx.Unlock() for r := hvs.round; r >= 0; r-- { - rvs := hvs.getVoteSet(r, types.PrevoteType) + rvs := hvs.getVoteSet(r, tmproto.PrevoteType) polBlockID, ok := rvs.TwoThirdsMajority() if ok { return r, polBlockID @@ -159,15 +163,15 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { return -1, types.BlockID{} } -func (hvs *HeightVoteSet) getVoteSet(round int, voteType types.SignedMsgType) *types.VoteSet { +func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet { rvs, ok := hvs.roundVoteSets[round] if !ok { return nil } switch voteType { - case types.PrevoteType: + case tmproto.PrevoteType: return rvs.Prevotes - case types.PrecommitType: + case tmproto.PrecommitType: return rvs.Precommits default: panic(fmt.Sprintf("Unexpected vote type %X", voteType)) @@ -179,8 +183,8 @@ func (hvs *HeightVoteSet) getVoteSet(round int, voteType types.SignedMsgType) *t // this can cause memory issues. // TODO: implement ability to remove peers too func (hvs *HeightVoteSet) SetPeerMaj23( - round int, - voteType types.SignedMsgType, + round int32, + voteType tmproto.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error { hvs.mtx.Lock() @@ -207,7 +211,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { defer hvs.mtx.Unlock() vsStrings := make([]string, 0, (len(hvs.roundVoteSets)+1)*2) // rounds 0 ~ hvs.round inclusive - for round := 0; round <= hvs.round; round++ { + for round := int32(0); round <= hvs.round; round++ { voteSetString := hvs.roundVoteSets[round].Prevotes.StringShort() vsStrings = append(vsStrings, voteSetString) voteSetString = hvs.roundVoteSets[round].Precommits.StringShort() @@ -234,16 +238,14 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - - allVotes := hvs.toAllRoundVotes() - return cdc.MarshalJSON(allVotes) + return tmjson.Marshal(hvs.toAllRoundVotes()) } func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { totalRounds := hvs.round + 1 allVotes := make([]roundVotes, totalRounds) // rounds 0 ~ hvs.round inclusive - for round := 0; round < totalRounds; round++ { + for round := int32(0); round < totalRounds; round++ { allVotes[round] = roundVotes{ Round: round, Prevotes: hvs.roundVoteSets[round].Prevotes.VoteStrings(), @@ -257,7 +259,7 @@ func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { } type roundVotes struct { - Round int `json:"round"` + Round int32 `json:"round"` Prevotes []string `json:"prevotes"` PrevotesBitArray string `json:"prevotes_bit_array"` Precommits []string `json:"precommits"` diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 654880d27..68c4d98c0 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -6,6 +6,9 @@ import ( "testing" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -24,19 +27,19 @@ func TestPeerCatchupRounds(t *testing.T) { hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) - vote999_0 := makeVoteHR(t, 1, 999, privVals, 0) + vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0) + vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0) + vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -52,26 +55,33 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote { +func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { privVal := privVals[valIndex] pubKey, err := privVal.GetPubKey() if err != nil { panic(err) } + randBytes := tmrand.Bytes(tmhash.Size) + vote := &types.Vote{ ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, Timestamp: tmtime.Now(), - Type: types.PrecommitType, - BlockID: types.BlockID{Hash: []byte("fakehash"), PartsHeader: types.PartSetHeader{}}, + Type: tmproto.PrecommitType, + BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } chainID := config.ChainID() - err = privVal.SignVote(chainID, vote) + + v := vote.ToProto() + err = privVal.SignVote(chainID, v) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } + + vote.Signature = v.Signature + return vote } diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go index 937f597fe..07283c5b4 100644 --- a/consensus/types/peer_round_state.go +++ b/consensus/types/peer_round_state.go @@ -14,27 +14,28 @@ import ( // NOTE: Read-only when returned by PeerState.GetRoundState(). type PeerRoundState struct { Height int64 `json:"height"` // Height peer is at - Round int `json:"round"` // Round peer is at, -1 if unknown. + Round int32 `json:"round"` // Round peer is at, -1 if unknown. Step RoundStepType `json:"step"` // Step peer is at // Estimated start of round 0 at this height StartTime time.Time `json:"start_time"` // True if peer has proposal for this round - Proposal bool `json:"proposal"` - ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` // - ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"` // - ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none. + Proposal bool `json:"proposal"` + ProposalBlockPartSetHeader types.PartSetHeader `json:"proposal_block_part_set_header"` + ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"` + // Proposal's POL round. -1 if none. + ProposalPOLRound int32 `json:"proposal_pol_round"` // nil until ProposalPOLMessage received. ProposalPOL *bits.BitArray `json:"proposal_pol"` Prevotes *bits.BitArray `json:"prevotes"` // All votes peer has for this round Precommits *bits.BitArray `json:"precommits"` // All precommits peer has for this round - LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none. + LastCommitRound int32 `json:"last_commit_round"` // Round of commit for last height. -1 if none. LastCommit *bits.BitArray `json:"last_commit"` // All commit precommits of commit for last height. // Round that we have commit for. Not necessarily unique. -1 if none. - CatchupCommitRound int `json:"catchup_commit_round"` + CatchupCommitRound int32 `json:"catchup_commit_round"` // All commit precommits peer has for this height & CatchupCommitRound CatchupCommit *bits.BitArray `json:"catchup_commit"` @@ -57,7 +58,7 @@ func (prs PeerRoundState) StringIndented(indent string) string { %s Catchup %v (round %v) %s}`, indent, prs.Height, prs.Round, prs.Step, prs.StartTime, - indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts, + indent, prs.ProposalBlockPartSetHeader, prs.ProposalBlockParts, indent, prs.ProposalPOL, prs.ProposalPOLRound, indent, prs.Prevotes, indent, prs.Precommits, @@ -65,31 +66,3 @@ func (prs PeerRoundState) StringIndented(indent string) string { indent, prs.CatchupCommit, prs.CatchupCommitRound, indent) } - -//----------------------------------------------------------- -// These methods are for Protobuf Compatibility - -// Size returns the size of the amino encoding, in bytes. -func (prs *PeerRoundState) Size() int { - bs, _ := prs.Marshal() - return len(bs) -} - -// Marshal returns the amino encoding. -func (prs *PeerRoundState) Marshal() ([]byte, error) { - return cdc.MarshalBinaryBare(prs) -} - -// MarshalTo calls Marshal and copies to the given buffer. -func (prs *PeerRoundState) MarshalTo(data []byte) (int, error) { - bs, err := prs.Marshal() - if err != nil { - return -1, err - } - return copy(data, bs), nil -} - -// Unmarshal deserializes from amino encoded form. -func (prs *PeerRoundState) Unmarshal(bs []byte) error { - return cdc.UnmarshalBinaryBare(bs, prs) -} diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index 0a18b6d04..9e67b76c0 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -66,7 +66,7 @@ func (rs RoundStepType) String() string { // of the cs.receiveRoutine type RoundState struct { Height int64 `json:"height"` // Height we are working on - Round int `json:"round"` + Round int32 `json:"round"` Step RoundStepType `json:"step"` StartTime time.Time `json:"start_time"` @@ -76,18 +76,18 @@ type RoundState struct { Proposal *types.Proposal `json:"proposal"` ProposalBlock *types.Block `json:"proposal_block"` ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int `json:"locked_round"` + LockedRound int32 `json:"locked_round"` LockedBlock *types.Block `json:"locked_block"` LockedBlockParts *types.PartSet `json:"locked_block_parts"` // Last known round with POL for non-nil valid block. - ValidRound int `json:"valid_round"` + ValidRound int32 `json:"valid_round"` ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. - // Last known block parts of POL metnioned above. + // Last known block parts of POL mentioned above. ValidBlockParts *types.PartSet `json:"valid_block_parts"` Votes *HeightVoteSet `json:"votes"` - CommitRound int `json:"commit_round"` // + CommitRound int32 `json:"commit_round"` // LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 LastValidators *types.ValidatorSet `json:"last_validators"` TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` @@ -149,8 +149,8 @@ func (rs *RoundState) CompleteProposalEvent() types.EventDataCompleteProposal { // We must construct BlockID from ProposalBlock and ProposalBlockParts // cs.Proposal is not guaranteed to be set when this function is called blockID := types.BlockID{ - Hash: rs.ProposalBlock.Hash(), - PartsHeader: rs.ProposalBlockParts.Header(), + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } return types.EventDataCompleteProposal{ @@ -213,31 +213,3 @@ func (rs *RoundState) StringShort() string { return fmt.Sprintf(`RoundState{H:%v R:%v S:%v ST:%v}`, rs.Height, rs.Round, rs.Step, rs.StartTime) } - -//----------------------------------------------------------- -// These methods are for Protobuf Compatibility - -// Size returns the size of the amino encoding, in bytes. -func (rs *RoundStateSimple) Size() int { - bs, _ := rs.Marshal() - return len(bs) -} - -// Marshal returns the amino encoding. -func (rs *RoundStateSimple) Marshal() ([]byte, error) { - return cdc.MarshalBinaryBare(rs) -} - -// MarshalTo calls Marshal and copies to the given buffer. -func (rs *RoundStateSimple) MarshalTo(data []byte) (int, error) { - bs, err := rs.Marshal() - if err != nil { - return -1, err - } - return copy(data, bs), nil -} - -// Unmarshal deserializes from amino encoded form. -func (rs *RoundStateSimple) Unmarshal(bs []byte) error { - return cdc.UnmarshalBinaryBare(bs, rs) -} diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go deleted file mode 100644 index 131158f0e..000000000 --- a/consensus/types/round_state_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package types - -import ( - "testing" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" - tmtime "github.com/tendermint/tendermint/types/time" -) - -func BenchmarkRoundStateDeepCopy(b *testing.B) { - b.StopTimer() - - // Random validators - nval, ntxs := 100, 100 - vset, _ := types.RandValidatorSet(nval, 1) - commitSigs := make([]types.CommitSig, nval) - blockID := types.BlockID{ - Hash: tmrand.Bytes(tmhash.Size), - PartsHeader: types.PartSetHeader{ - Hash: tmrand.Bytes(tmhash.Size), - Total: 1000, - }, - } - sig := make([]byte, ed25519.SignatureSize) - for i := 0; i < nval; i++ { - commitSigs[i] = (&types.Vote{ - ValidatorAddress: types.Address(tmrand.Bytes(20)), - Timestamp: tmtime.Now(), - BlockID: blockID, - Signature: sig, - }).CommitSig() - } - txs := make([]types.Tx, ntxs) - for i := 0; i < ntxs; i++ { - txs[i] = tmrand.Bytes(100) - } - // Random block - block := &types.Block{ - Header: types.Header{ - ChainID: tmrand.Str(12), - Time: tmtime.Now(), - LastBlockID: blockID, - LastCommitHash: tmrand.Bytes(20), - DataHash: tmrand.Bytes(20), - ValidatorsHash: tmrand.Bytes(20), - ConsensusHash: tmrand.Bytes(20), - AppHash: tmrand.Bytes(20), - LastResultsHash: tmrand.Bytes(20), - EvidenceHash: tmrand.Bytes(20), - }, - Data: types.Data{ - Txs: txs, - }, - Evidence: types.EvidenceData{}, - LastCommit: types.NewCommit(1, 0, blockID, commitSigs), - } - parts := block.MakePartSet(4096) - // Random Proposal - proposal := &types.Proposal{ - Timestamp: tmtime.Now(), - BlockID: blockID, - Signature: sig, - } - // Random HeightVoteSet - // TODO: hvs := - - rs := &RoundState{ - StartTime: tmtime.Now(), - CommitTime: tmtime.Now(), - Validators: vset, - Proposal: proposal, - ProposalBlock: block, - ProposalBlockParts: parts, - LockedBlock: block, - LockedBlockParts: parts, - ValidBlock: block, - ValidBlockParts: parts, - Votes: nil, // TODO - LastCommit: nil, // TODO - LastValidators: vset, - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - amino.DeepCopy(rs) - } -} diff --git a/consensus/wal.go b/consensus/wal.go index 7b09ffa2d..80f5e6b07 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -2,30 +2,26 @@ package consensus import ( "encoding/binary" + "errors" "fmt" "hash/crc32" "io" "path/filepath" "time" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" + "github.com/gogo/protobuf/proto" auto "github.com/tendermint/tendermint/libs/autofile" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmtime "github.com/tendermint/tendermint/types/time" ) const ( - // amino overhead + time.Time + max consensus msg size - // - // q: where 24 bytes are coming from? - // a: cdc.MustMarshalBinaryBare(empty consensus part msg) = 14 bytes. +10 - // bytes just in case amino will require more space in the future. + // time.Time + max consensus msg size maxMsgSizeBytes = maxMsgSize + 24 // how often the WAL should be sync'd during period sync'ing @@ -49,12 +45,10 @@ type EndHeightMessage struct { type WALMessage interface{} -func RegisterWALMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*WALMessage)(nil), nil) - cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil) - cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil) - cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil) - cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil) +func init() { + tmjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") + tmjson.RegisterType(timeoutInfo{}, "tendermint/wal/TimeoutInfo") + tmjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") } //-------------------------------------------------------- @@ -97,7 +91,7 @@ var _ WAL = &BaseWAL{} func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { err := tmos.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { - return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") + return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) } group, err := auto.OpenGroup(walFile, groupOptions...) @@ -132,7 +126,9 @@ func (wal *BaseWAL) OnStart() error { if err != nil { return err } else if size == 0 { - wal.WriteSync(EndHeightMessage{0}) + if err := wal.WriteSync(EndHeightMessage{0}); err != nil { + return err + } } err = wal.group.Start() if err != nil { @@ -167,8 +163,12 @@ func (wal *BaseWAL) FlushAndSync() error { // before cleaning up files. func (wal *BaseWAL) OnStop() { wal.flushTicker.Stop() - wal.FlushAndSync() - wal.group.Stop() + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error("error on flush data to disk", "error", err) + } + if err := wal.group.Stop(); err != nil { + wal.Logger.Error("error trying to stop wal", "error", err) + } wal.group.Close() } @@ -208,7 +208,7 @@ func (wal *BaseWAL) WriteSync(msg WALMessage) error { } if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error(`WriteSync failed to flush consensus wal. + wal.Logger.Error(`WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`, "err", err) return err @@ -282,11 +282,9 @@ func (wal *BaseWAL) SearchForEndHeight( return nil, false, nil } -/////////////////////////////////////////////////////////////////////////////// - // A WALEncoder writes custom-encoded WAL messages to an output stream. // -// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded) +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value type WALEncoder struct { wr io.Writer } @@ -297,10 +295,22 @@ func NewWALEncoder(wr io.Writer) *WALEncoder { } // Encode writes the custom encoding of v to the stream. It returns an error if -// the amino-encoded size of v is greater than 1MB. Any error encountered +// the encoded size of v is greater than 1MB. Any error encountered // during the write is also returned. func (enc *WALEncoder) Encode(v *TimedWALMessage) error { - data := cdc.MustMarshalBinaryBare(v) + pbMsg, err := WALToProto(v.Msg) + if err != nil { + return err + } + pv := tmcons.TimedWALMessage{ + Time: v.Time, + Msg: pbMsg, + } + + data, err := proto.Marshal(&pv) + if err != nil { + panic(fmt.Errorf("encode timed wall message failure: %w", err)) + } crc := crc32.Checksum(data, crc32c) length := uint32(len(data)) @@ -314,12 +324,10 @@ func (enc *WALEncoder) Encode(v *TimedWALMessage) error { binary.BigEndian.PutUint32(msg[4:8], length) copy(msg[8:], data) - _, err := enc.wr.Write(msg) + _, err = enc.wr.Write(msg) return err } -/////////////////////////////////////////////////////////////////////////////// - // IsDataCorruptionError returns true if data has been corrupted inside WAL. func IsDataCorruptionError(err error) bool { _, ok := err.(DataCorruptionError) @@ -358,7 +366,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { b := make([]byte, 4) _, err := dec.rd.Read(b) - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, err } if err != nil { @@ -392,13 +400,22 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)} } - var res = new(TimedWALMessage) // nolint: gosimple - err = cdc.UnmarshalBinaryBare(data, res) + var res = new(tmcons.TimedWALMessage) + err = proto.Unmarshal(data, res) if err != nil { return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} } - return res, err + walMsg, err := WALFromProto(res.Msg) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to convert from proto: %w", err)} + } + tMsgWal := &TimedWALMessage{ + Time: res.Time, + Msg: walMsg, + } + + return tMsgWal, err } type nilWAL struct{} diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 422c3f73b..b3dced8f4 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -9,15 +9,12 @@ import ( "testing" "time" - "github.com/pkg/errors" - db "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -38,43 +35,57 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { logger := log.TestingLogger().With("wal_generator", "wal_generator") logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) - ///////////////////////////////////////////////////////////////////////////// // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // NOTE: we can't import node package because of circular dependency. // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. privValidatorKeyFile := config.PrivValidatorKeyFile() privValidatorStateFile := config.PrivValidatorStateFile() - privValidator := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + return err + } genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) if err != nil { - return errors.Wrap(err, "failed to read genesis file") + return fmt.Errorf("failed to read genesis file: %w", err) } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB + stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) if err != nil { - return errors.Wrap(err, "failed to make genesis state") + return fmt.Errorf("failed to make genesis state: %w", err) } state.Version.Consensus.App = kvstore.ProtocolVersion - sm.SaveState(stateDB, state) + if err = stateStore.Save(state); err != nil { + t.Error(err) + } + blockStore := store.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { - return errors.Wrap(err, "failed to start proxy app connections") + return fmt.Errorf("failed to start proxy app connections: %w", err) } - defer proxyApp.Stop() + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { - return errors.Wrap(err, "failed to start event bus") + return fmt.Errorf("failed to start event bus: %w", err) } - defer eventBus.Stop() - mempool := mock.Mempool{} - evpool := sm.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + mempool := emptyMempool{} + evpool := sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) @@ -82,30 +93,36 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { consensusState.SetPrivValidator(privValidator) } // END OF COPY PASTE - ///////////////////////////////////////////////////////////////////////////// // set consensus wal to buffered WAL, which will write all incoming msgs to buffer numBlocksWritten := make(chan struct{}) wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 - wal.Write(EndHeightMessage{0}) + if err := wal.Write(EndHeightMessage{0}); err != nil { + t.Error(err) + } + consensusState.wal = wal if err := consensusState.Start(); err != nil { - return errors.Wrap(err, "failed to start consensus state") + return fmt.Errorf("failed to start consensus state: %w", err) } select { case <-numBlocksWritten: - consensusState.Stop() + if err := consensusState.Stop(); err != nil { + t.Error(err) + } return nil case <-time.After(1 * time.Minute): - consensusState.Stop() + if err := consensusState.Stop(); err != nil { + t.Error(err) + } return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) } } -//WALWithNBlocks returns a WAL content with numBlocks. +// WALWithNBlocks returns a WAL content with numBlocks. func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { var b bytes.Buffer wr := bufio.NewWriter(&b) diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 6871f534d..4ee813609 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -46,7 +46,9 @@ func TestWALTruncate(t *testing.T) { err = wal.Start() require.NoError(t, err) defer func() { - wal.Stop() + if err := wal.Stop(); err != nil { + t.Error(err) + } // wait for the wal to finish shutting down so we // can safely remove the directory wal.Wait() @@ -58,9 +60,11 @@ func TestWALTruncate(t *testing.T) { err = WALGenerateNBlocks(t, wal.Group(), 60) require.NoError(t, err) - time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run + time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run - wal.FlushAndSync() + if err := wal.FlushAndSync(); err != nil { + t.Error(err) + } h := int64(50) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -82,6 +86,7 @@ func TestWALEncoderDecoder(t *testing.T) { msgs := []TimedWALMessage{ {Time: now, Msg: EndHeightMessage{0}}, {Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, + {Time: now, Msg: tmtypes.EventDataRoundState{Height: 1, Round: 1, Step: ""}}, } b := new(bytes.Buffer) @@ -98,7 +103,6 @@ func TestWALEncoderDecoder(t *testing.T) { dec := NewWALDecoder(b) decoded, err := dec.Decode() require.NoError(t, err) - assert.Equal(t, msg.Time.UTC(), decoded.Time) assert.Equal(t, msg.Msg, decoded.Msg) } @@ -115,7 +119,9 @@ func TestWALWrite(t *testing.T) { err = wal.Start() require.NoError(t, err) defer func() { - wal.Stop() + if err := wal.Stop(); err != nil { + t.Error(err) + } // wait for the wal to finish shutting down so we // can safely remove the directory wal.Wait() @@ -128,14 +134,17 @@ func TestWALWrite(t *testing.T) { Part: &tmtypes.Part{ Index: 1, Bytes: make([]byte, 1), - Proof: merkle.SimpleProof{ + Proof: merkle.Proof{ Total: 1, Index: 1, LeafHash: make([]byte, maxMsgSizeBytes-30), }, }, } - err = wal.Write(msg) + + err = wal.Write(msgInfo{ + Msg: msg, + }) if assert.Error(t, err) { assert.Contains(t, err.Error(), "msg is too big") } @@ -188,7 +197,9 @@ func TestWALPeriodicSync(t *testing.T) { require.NoError(t, wal.Start()) defer func() { - wal.Stop() + if err := wal.Stop(); err != nil { + t.Error(err) + } wal.Wait() }() @@ -233,7 +244,9 @@ func benchmarkWalDecode(b *testing.B, n int) { enc := NewWALEncoder(buf) data := nBytes(n) - enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}) + if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { + b.Error(err) + } encoded := buf.Bytes() diff --git a/crypto/README.md b/crypto/README.md index cfbceb449..20346d715 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -3,28 +3,26 @@ crypto is the cryptographic package adapted for Tendermint's uses ## Importing it + To get the interfaces, `import "github.com/tendermint/tendermint/crypto"` For any specific algorithm, use its specific module e.g. `import "github.com/tendermint/tendermint/crypto/ed25519"` -If you want to decode bytes into one of the types, but don't care about the specific algorithm, use -`import "github.com/tendermint/tendermint/crypto/amino"` - ## Binary encoding For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html). ## JSON Encoding -crypto `.Bytes()` uses Amino:binary encoding, but Amino:JSON is also supported. +JSON encoding is done using tendermint's internal json encoder. For more information on JSON encoding, please refer to [Tendermint JSON encoding](https://github.com/tendermint/tendermint/blob/ccc990498df70f5a3df06d22476c9bb83812cbe3/libs/json/doc.go) ```go -Example Amino:JSON encodings: +Example JSON encodings: -ed25519.PrivKeyEd25519 - {"type":"tendermint/PrivKeyEd25519","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} -ed25519.PubKeyEd25519 - {"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} +ed25519.PrivKey - {"type":"tendermint/PrivKeyEd25519","value":"EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="} +ed25519.PubKey - {"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="} sr25519.PrivKeySr25519 - {"type":"tendermint/PrivKeySr25519","value":"xtYVH8UCIqfrY8FIFc0QEpAEBShSG4NT0zlEOVSZ2w4="} sr25519.PubKeySr25519 - {"type":"tendermint/PubKeySr25519","value":"8sKBLKQ/OoXMcAJVxBqz1U7TyxRFQ5cmliuHy4MrF0s="} crypto.PrivKeySecp256k1 - {"type":"tendermint/PrivKeySecp256k1","value":"zx4Pnh67N+g2V+5vZbQzEyRerX9c4ccNZOVzM9RvJ0Y="} diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go index 4aa23b211..8ecfaa0e1 100644 --- a/crypto/armor/armor_test.go +++ b/crypto/armor/armor_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestSimpleArmor(t *testing.T) { +func TestArmor(t *testing.T) { blockType := "MINT TEST" data := []byte("somedata") armorStr := EncodeArmor(blockType, nil, data) diff --git a/crypto/crypto.go b/crypto/crypto.go index 045a35e86..9a341f9ac 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -22,8 +22,9 @@ func AddressHash(bz []byte) Address { type PubKey interface { Address() Address Bytes() []byte - VerifyBytes(msg []byte, sig []byte) bool + VerifySignature(msg []byte, sig []byte) bool Equals(PubKey) bool + Type() string } type PrivKey interface { @@ -31,6 +32,7 @@ type PrivKey interface { Sign(msg []byte) ([]byte, error) PubKey() PubKey Equals(PrivKey) bool + Type() string } type Symmetric interface { diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index 1ce6f7b11..30d470485 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -2,47 +2,50 @@ package ed25519 import ( "bytes" + "crypto/ed25519" "crypto/subtle" "fmt" "io" - amino "github.com/tendermint/go-amino" - "golang.org/x/crypto/ed25519" + "github.com/hdevalence/ed25519consensus" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" + tmjson "github.com/tendermint/tendermint/libs/json" ) //------------------------------------- -var _ crypto.PrivKey = PrivKeyEd25519{} +var _ crypto.PrivKey = PrivKey{} const ( - PrivKeyAminoName = "tendermint/PrivKeyEd25519" - PubKeyAminoName = "tendermint/PubKeyEd25519" + PrivKeyName = "tendermint/PrivKeyEd25519" + PubKeyName = "tendermint/PubKeyEd25519" + // PubKeySize is is the size, in bytes, of public keys as used in this package. + PubKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 // Size of an Edwards25519 signature. Namely the size of a compressed // Edwards25519 point, and a field element. Both of which are 32 bytes. SignatureSize = 64 -) + // SeedSize is the size, in bytes, of private key seeds. These are the + // private key representations used by RFC 8032. + SeedSize = 32 -var cdc = amino.NewCodec() + KeyType = "ed25519" +) func init() { - cdc.RegisterInterface((*crypto.PubKey)(nil), nil) - cdc.RegisterConcrete(PubKeyEd25519{}, - PubKeyAminoName, nil) - - cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) - cdc.RegisterConcrete(PrivKeyEd25519{}, - PrivKeyAminoName, nil) + tmjson.RegisterType(PubKey{}, PubKeyName) + tmjson.RegisterType(PrivKey{}, PrivKeyName) } -// PrivKeyEd25519 implements crypto.PrivKey. -type PrivKeyEd25519 [64]byte +// PrivKey implements crypto.PrivKey. +type PrivKey []byte -// Bytes marshals the privkey using amino encoding. -func (privKey PrivKeyEd25519) Bytes() []byte { - return cdc.MustMarshalBinaryBare(privKey) +// Bytes returns the privkey byte format. +func (privKey PrivKey) Bytes() []byte { + return []byte(privKey) } // Sign produces a signature on the provided message. @@ -52,19 +55,19 @@ func (privKey PrivKeyEd25519) Bytes() []byte { // The latter 32 bytes should be the compressed public key. // If these conditions aren't met, Sign will panic or produce an // incorrect signature. -func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) { - signatureBytes := ed25519.Sign(privKey[:], msg) +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + signatureBytes := ed25519.Sign(ed25519.PrivateKey(privKey), msg) return signatureBytes, nil } // PubKey gets the corresponding public key from the private key. -func (privKey PrivKeyEd25519) PubKey() crypto.PubKey { - privKeyBytes := [64]byte(privKey) +// +// Panics if the private key is not initialized. +func (privKey PrivKey) PubKey() crypto.PubKey { + // If the latter 32 bytes of the privkey are all zero, privkey is not + // initialized. initialized := false - // If the latter 32 bytes of the privkey are all zero, compute the pubkey - // otherwise privkey is initialized and we can use the cached value inside - // of the private key. - for _, v := range privKeyBytes[32:] { + for _, v := range privKey[32:] { if v != 0 { initialized = true break @@ -72,97 +75,96 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey { } if !initialized { - panic("Expected PrivKeyEd25519 to include concatenated pubkey bytes") + panic("Expected ed25519 PrivKey to include concatenated pubkey bytes") } - var pubkeyBytes [PubKeyEd25519Size]byte - copy(pubkeyBytes[:], privKeyBytes[32:]) - return PubKeyEd25519(pubkeyBytes) + pubkeyBytes := make([]byte, PubKeySize) + copy(pubkeyBytes, privKey[32:]) + return PubKey(pubkeyBytes) } // Equals - you probably don't need to use this. // Runs in constant time based on length of the keys. -func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool { - if otherEd, ok := other.(PrivKeyEd25519); ok { +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherEd, ok := other.(PrivKey); ok { return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 } return false } +func (privKey PrivKey) Type() string { + return KeyType +} + // GenPrivKey generates a new ed25519 private key. // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. -func GenPrivKey() PrivKeyEd25519 { +func GenPrivKey() PrivKey { return genPrivKey(crypto.CReader()) } // genPrivKey generates a new ed25519 private key using the provided reader. -func genPrivKey(rand io.Reader) PrivKeyEd25519 { - seed := make([]byte, 32) +func genPrivKey(rand io.Reader) PrivKey { + seed := make([]byte, SeedSize) + _, err := io.ReadFull(rand, seed) if err != nil { panic(err) } - privKey := ed25519.NewKeyFromSeed(seed) - var privKeyEd PrivKeyEd25519 - copy(privKeyEd[:], privKey) - return privKeyEd + return PrivKey(ed25519.NewKeyFromSeed(seed)) } // GenPrivKeyFromSecret hashes the secret with SHA2, and uses // that 32 byte output to create the private key. // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. -func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 { +func GenPrivKeyFromSecret(secret []byte) PrivKey { seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - privKey := ed25519.NewKeyFromSeed(seed) - var privKeyEd PrivKeyEd25519 - copy(privKeyEd[:], privKey) - return privKeyEd + return PrivKey(ed25519.NewKeyFromSeed(seed)) } //------------------------------------- -var _ crypto.PubKey = PubKeyEd25519{} - -// PubKeyEd25519Size is the number of bytes in an Ed25519 signature. -const PubKeyEd25519Size = 32 +var _ crypto.PubKey = PubKey{} // PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme. -type PubKeyEd25519 [PubKeyEd25519Size]byte +type PubKey []byte // Address is the SHA256-20 of the raw pubkey bytes. -func (pubKey PubKeyEd25519) Address() crypto.Address { - return crypto.Address(tmhash.SumTruncated(pubKey[:])) +func (pubKey PubKey) Address() crypto.Address { + if len(pubKey) != PubKeySize { + panic("pubkey is incorrect size") + } + return crypto.Address(tmhash.SumTruncated(pubKey)) } -// Bytes marshals the PubKey using amino encoding. -func (pubKey PubKeyEd25519) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(pubKey) - if err != nil { - panic(err) - } - return bz +// Bytes returns the PubKey byte format. +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) } -func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig []byte) bool { +func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { // make sure we use the same algorithm to sign if len(sig) != SignatureSize { return false } - return ed25519.Verify(pubKey[:], msg, sig) + + return ed25519consensus.Verify(ed25519.PublicKey(pubKey), msg, sig) +} + +func (pubKey PubKey) String() string { + return fmt.Sprintf("PubKeyEd25519{%X}", []byte(pubKey)) } -func (pubKey PubKeyEd25519) String() string { - return fmt.Sprintf("PubKeyEd25519{%X}", pubKey[:]) +func (pubKey PubKey) Type() string { + return KeyType } -// nolint: golint -func (pubKey PubKeyEd25519) Equals(other crypto.PubKey) bool { - if otherEd, ok := other.(PubKeyEd25519); ok { +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherEd, ok := other.(PubKey); ok { return bytes.Equal(pubKey[:], otherEd[:]) } diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index 6fe2c0946..8c48847c0 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -20,11 +20,11 @@ func TestSignAndValidateEd25519(t *testing.T) { require.Nil(t, err) // Test the signature - assert.True(t, pubKey.VerifyBytes(msg, sig)) + assert.True(t, pubKey.VerifySignature(msg, sig)) // Mutate the signature, just one bit. // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 sig[7] ^= byte(0x01) - assert.False(t, pubKey.VerifyBytes(msg, sig)) + assert.False(t, pubKey.VerifySignature(msg, sig)) } diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go deleted file mode 100644 index f7a2dde77..000000000 --- a/crypto/encoding/amino/amino.go +++ /dev/null @@ -1,86 +0,0 @@ -package cryptoamino - -import ( - "reflect" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/multisig" - "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/sr25519" -) - -var cdc = amino.NewCodec() - -// nameTable is used to map public key concrete types back -// to their registered amino names. This should eventually be handled -// by amino. Example usage: -// nameTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoName -var nameTable = make(map[reflect.Type]string, 3) - -func init() { - // NOTE: It's important that there be no conflicts here, - // as that would change the canonical representations, - // and therefore change the address. - // TODO: Remove above note when - // https://github.com/tendermint/go-amino/issues/9 - // is resolved - RegisterAmino(cdc) - - // TODO: Have amino provide a way to go from concrete struct to route directly. - // Its currently a private API - nameTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoName - nameTable[reflect.TypeOf(sr25519.PubKeySr25519{})] = sr25519.PubKeyAminoName - nameTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoName - nameTable[reflect.TypeOf(multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute -} - -// PubkeyAminoName returns the amino route of a pubkey -// cdc is currently passed in, as eventually this will not be using -// a package level codec. -func PubkeyAminoName(cdc *amino.Codec, key crypto.PubKey) (string, bool) { - route, found := nameTable[reflect.TypeOf(key)] - return route, found -} - -// RegisterAmino registers all crypto related types in the given (amino) codec. -func RegisterAmino(cdc *amino.Codec) { - // These are all written here instead of - cdc.RegisterInterface((*crypto.PubKey)(nil), nil) - cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, - ed25519.PubKeyAminoName, nil) - cdc.RegisterConcrete(sr25519.PubKeySr25519{}, - sr25519.PubKeyAminoName, nil) - cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, - secp256k1.PubKeyAminoName, nil) - cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{}, - multisig.PubKeyMultisigThresholdAminoRoute, nil) - - cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) - cdc.RegisterConcrete(ed25519.PrivKeyEd25519{}, - ed25519.PrivKeyAminoName, nil) - cdc.RegisterConcrete(sr25519.PrivKeySr25519{}, - sr25519.PrivKeyAminoName, nil) - cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{}, - secp256k1.PrivKeyAminoName, nil) -} - -// RegisterKeyType registers an external key type to allow decoding it from bytes -func RegisterKeyType(o interface{}, name string) { - cdc.RegisterConcrete(o, name, nil) - nameTable[reflect.TypeOf(o)] = name -} - -// PrivKeyFromBytes unmarshals private key bytes and returns a PrivKey -func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) { - err = cdc.UnmarshalBinaryBare(privKeyBytes, &privKey) - return -} - -// PubKeyFromBytes unmarshals public key bytes and returns a PubKey -func PubKeyFromBytes(pubKeyBytes []byte) (pubKey crypto.PubKey, err error) { - err = cdc.UnmarshalBinaryBare(pubKeyBytes, &pubKey) - return -} diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go deleted file mode 100644 index edc54292f..000000000 --- a/crypto/encoding/amino/encode_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package cryptoamino - -import ( - "os" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/multisig" - "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/sr25519" -) - -type byter interface { - Bytes() []byte -} - -func checkAminoBinary(t *testing.T, src, dst interface{}, size int) { - // Marshal to binary bytes. - bz, err := cdc.MarshalBinaryBare(src) - require.Nil(t, err, "%+v", err) - if byterSrc, ok := src.(byter); ok { - // Make sure this is compatible with current (Bytes()) encoding. - assert.Equal(t, byterSrc.Bytes(), bz, "Amino binary vs Bytes() mismatch") - } - // Make sure we have the expected length. - assert.Equal(t, size, len(bz), "Amino binary size mismatch") - - // Unmarshal. - err = cdc.UnmarshalBinaryBare(bz, dst) - require.Nil(t, err, "%+v", err) -} - -func checkAminoJSON(t *testing.T, src interface{}, dst interface{}, isNil bool) { - // Marshal to JSON bytes. - js, err := cdc.MarshalJSON(src) - require.Nil(t, err, "%+v", err) - if isNil { - assert.Equal(t, string(js), `null`) - } else { - assert.Contains(t, string(js), `"type":`) - assert.Contains(t, string(js), `"value":`) - } - // Unmarshal. - err = cdc.UnmarshalJSON(js, dst) - require.Nil(t, err, "%+v", err) -} - -// ExamplePrintRegisteredTypes refers to unknown identifier: PrintRegisteredTypes -//nolint:govet -func ExamplePrintRegisteredTypes() { - cdc.PrintTypes(os.Stdout) - // Output: | Type | Name | Prefix | Length | Notes | - //| ---- | ---- | ------ | ----- | ------ | - //| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | - //| PubKeySr25519 | tendermint/PubKeySr25519 | 0x0DFB1005 | 0x20 | | - //| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | - //| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | | - //| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | - //| PrivKeySr25519 | tendermint/PrivKeySr25519 | 0x2F82D78B | 0x20 | | - //| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | -} - -func TestKeyEncodings(t *testing.T) { - cases := []struct { - privKey crypto.PrivKey - privSize, pubSize, sigSize int // binary sizes - }{ - { - privKey: ed25519.GenPrivKey(), - privSize: 69, - pubSize: 37, - sigSize: 65, - }, - { - privKey: sr25519.GenPrivKey(), - privSize: 37, - pubSize: 37, - sigSize: 65, - }, - { - privKey: secp256k1.GenPrivKey(), - privSize: 37, - pubSize: 38, - sigSize: 65, - }, - } - - for tcIndex, tc := range cases { - - // Check (de/en)codings of PrivKeys. - var priv2, priv3 crypto.PrivKey - checkAminoBinary(t, tc.privKey, &priv2, tc.privSize) - assert.EqualValues(t, tc.privKey, priv2, "tc #%d", tcIndex) - checkAminoJSON(t, tc.privKey, &priv3, false) // TODO also check Prefix bytes. - assert.EqualValues(t, tc.privKey, priv3, "tc #%d", tcIndex) - - // Check (de/en)codings of Signatures. - var sig1, sig2 []byte - sig1, err := tc.privKey.Sign([]byte("something")) - assert.NoError(t, err, "tc #%d", tcIndex) - checkAminoBinary(t, sig1, &sig2, tc.sigSize) - assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex) - - // Check (de/en)codings of PubKeys. - pubKey := tc.privKey.PubKey() - var pub2, pub3 crypto.PubKey - checkAminoBinary(t, pubKey, &pub2, tc.pubSize) - assert.EqualValues(t, pubKey, pub2, "tc #%d", tcIndex) - checkAminoJSON(t, pubKey, &pub3, false) // TODO also check Prefix bytes. - assert.EqualValues(t, pubKey, pub3, "tc #%d", tcIndex) - } -} - -func TestNilEncodings(t *testing.T) { - - // Check nil Signature. - var a, b []byte - checkAminoJSON(t, &a, &b, true) - assert.EqualValues(t, a, b) - - // Check nil PubKey. - var c, d crypto.PubKey - checkAminoJSON(t, &c, &d, true) - assert.EqualValues(t, c, d) - - // Check nil PrivKey. - var e, f crypto.PrivKey - checkAminoJSON(t, &e, &f, true) - assert.EqualValues(t, e, f) -} - -func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { - pk, err := PubKeyFromBytes([]byte("foo")) - require.NotNil(t, err) - require.Nil(t, pk) -} - -func TestPubkeyAminoName(t *testing.T) { - tests := []struct { - key crypto.PubKey - want string - found bool - }{ - {ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoName, true}, - {sr25519.PubKeySr25519{}, sr25519.PubKeyAminoName, true}, - {secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoName, true}, - {multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true}, - } - for i, tc := range tests { - got, found := PubkeyAminoName(cdc, tc.key) - require.Equal(t, tc.found, found, "not equal on tc %d", i) - if tc.found { - require.Equal(t, tc.want, got, "not equal on tc %d", i) - } - } -} - -var _ crypto.PrivKey = testPriv{} -var _ crypto.PubKey = testPub{} -var testCdc = amino.NewCodec() - -type testPriv []byte - -func (privkey testPriv) PubKey() crypto.PubKey { return testPub{} } -func (privkey testPriv) Bytes() []byte { - return testCdc.MustMarshalBinaryBare(privkey) -} -func (privkey testPriv) Sign(msg []byte) ([]byte, error) { return []byte{}, nil } -func (privkey testPriv) Equals(other crypto.PrivKey) bool { return true } - -type testPub []byte - -func (key testPub) Address() crypto.Address { return crypto.Address{} } -func (key testPub) Bytes() []byte { - return testCdc.MustMarshalBinaryBare(key) -} -func (key testPub) VerifyBytes(msg []byte, sig []byte) bool { return true } -func (key testPub) Equals(other crypto.PubKey) bool { return true } - -var ( - privAminoName = "registerTest/Priv" - pubAminoName = "registerTest/Pub" -) - -func TestRegisterKeyType(t *testing.T) { - RegisterAmino(testCdc) - testCdc.RegisterConcrete(testPriv{}, privAminoName, nil) - testCdc.RegisterConcrete(testPub{}, pubAminoName, nil) - - pub := testPub{0x1} - priv := testPriv{0x2} - - // Check to make sure key cannot be decoded before registering - _, err := PrivKeyFromBytes(priv.Bytes()) - require.Error(t, err) - _, err = PubKeyFromBytes(pub.Bytes()) - require.Error(t, err) - - // Check that name is not registered - _, found := PubkeyAminoName(testCdc, pub) - require.False(t, found) - - // Register key types - RegisterKeyType(testPriv{}, privAminoName) - RegisterKeyType(testPub{}, pubAminoName) - - // Name should exist after registering - name, found := PubkeyAminoName(testCdc, pub) - require.True(t, found) - require.Equal(t, name, pubAminoName) - - // Decode keys using the encoded bytes from encoding with the other codec - decodedPriv, err := PrivKeyFromBytes(priv.Bytes()) - require.NoError(t, err) - require.Equal(t, priv, decodedPriv) - - decodedPub, err := PubKeyFromBytes(pub.Bytes()) - require.NoError(t, err) - require.Equal(t, pub, decodedPub) - - // Reset module codec after testing - cdc = amino.NewCodec() - nameTable = make(map[reflect.Type]string, 3) - RegisterAmino(cdc) - nameTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoName - nameTable[reflect.TypeOf(sr25519.PubKeySr25519{})] = sr25519.PubKeyAminoName - nameTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoName - nameTable[reflect.TypeOf(multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute -} diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go new file mode 100644 index 000000000..3c552ed23 --- /dev/null +++ b/crypto/encoding/codec.go @@ -0,0 +1,63 @@ +package encoding + +import ( + "fmt" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/libs/json" + pc "github.com/tendermint/tendermint/proto/tendermint/crypto" +) + +func init() { + json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") + json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") + json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") +} + +// PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey +func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { + var kp pc.PublicKey + switch k := k.(type) { + case ed25519.PubKey: + kp = pc.PublicKey{ + Sum: &pc.PublicKey_Ed25519{ + Ed25519: k, + }, + } + case secp256k1.PubKey: + kp = pc.PublicKey{ + Sum: &pc.PublicKey_Secp256K1{ + Secp256K1: k, + }, + } + default: + return kp, fmt.Errorf("toproto: key type %v is not supported", k) + } + return kp, nil +} + +// PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey +func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { + switch k := k.Sum.(type) { + case *pc.PublicKey_Ed25519: + if len(k.Ed25519) != ed25519.PubKeySize { + return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", + len(k.Ed25519), ed25519.PubKeySize) + } + pk := make(ed25519.PubKey, ed25519.PubKeySize) + copy(pk, k.Ed25519) + return pk, nil + case *pc.PublicKey_Secp256K1: + if len(k.Secp256K1) != secp256k1.PubKeySize { + return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", + len(k.Secp256K1), secp256k1.PubKeySize) + } + pk := make(secp256k1.PubKey, secp256k1.PubKeySize) + copy(pk, k.Secp256K1) + return pk, nil + default: + return nil, fmt.Errorf("fromproto: key type %v is not supported", k) + } +} diff --git a/crypto/hash.go b/crypto/hash.go index e1d22523f..dd1b4c1dd 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -6,6 +6,6 @@ import ( func Sha256(bytes []byte) []byte { hasher := sha256.New() - hasher.Write(bytes) + hasher.Write(bytes) //nolint:errcheck // ignore error return hasher.Sum(nil) } diff --git a/crypto/internal/benchmarking/bench.go b/crypto/internal/benchmarking/bench.go index 43ab312f0..b74b901db 100644 --- a/crypto/internal/benchmarking/bench.go +++ b/crypto/internal/benchmarking/bench.go @@ -37,7 +37,11 @@ func BenchmarkSigning(b *testing.B, priv crypto.PrivKey) { message := []byte("Hello, world!") b.ResetTimer() for i := 0; i < b.N; i++ { - priv.Sign(message) + _, err := priv.Sign(message) + + if err != nil { + b.FailNow() + } } } @@ -53,7 +57,7 @@ func BenchmarkVerification(b *testing.B, priv crypto.PrivKey) { } b.ResetTimer() for i := 0; i < b.N; i++ { - pub.VerifyBytes(message, signature) + pub.VerifySignature(message, signature) } } diff --git a/crypto/merkle/README.md b/crypto/merkle/README.md index c44978368..16b1abb58 100644 --- a/crypto/merkle/README.md +++ b/crypto/merkle/README.md @@ -1,4 +1,4 @@ -## Simple Merkle Tree +# Merkle Tree For smaller static data structures that don't require immutable snapshots or mutability; for instance the transactions and validation signatures of a block can be hashed using this simple merkle tree logic. diff --git a/crypto/merkle/codec.go b/crypto/merkle/codec.go deleted file mode 100644 index 2b6ee350b..000000000 --- a/crypto/merkle/codec.go +++ /dev/null @@ -1,12 +0,0 @@ -package merkle - -import ( - amino "github.com/tendermint/go-amino" -) - -var cdc *amino.Codec - -func init() { - cdc = amino.NewCodec() - cdc.Seal() -} diff --git a/crypto/merkle/compile.sh b/crypto/merkle/compile.sh deleted file mode 100644 index 8e4c739f4..000000000 --- a/crypto/merkle/compile.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash - -protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto -echo "--> adding nolint declarations to protobuf generated files" -awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new -mv merkle.pb.go.new merkle.pb.go diff --git a/crypto/merkle/hash.go b/crypto/merkle/hash.go index 4e24046ac..d45130fe5 100644 --- a/crypto/merkle/hash.go +++ b/crypto/merkle/hash.go @@ -10,6 +10,11 @@ var ( innerPrefix = []byte{1} ) +// returns tmhash() +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + // returns tmhash(0x00 || leaf) func leafHash(leaf []byte) []byte { return tmhash.Sum(append(leafPrefix, leaf...)) diff --git a/crypto/merkle/merkle.pb.go b/crypto/merkle/merkle.pb.go deleted file mode 100644 index 80823dd2b..000000000 --- a/crypto/merkle/merkle.pb.go +++ /dev/null @@ -1,812 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: crypto/merkle/merkle.proto - -package merkle - -import ( - bytes "bytes" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data -// for example neighbouring node hash -type ProofOp struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProofOp) Reset() { *m = ProofOp{} } -func (m *ProofOp) String() string { return proto.CompactTextString(m) } -func (*ProofOp) ProtoMessage() {} -func (*ProofOp) Descriptor() ([]byte, []int) { - return fileDescriptor_9c1c2162d560d38e, []int{0} -} -func (m *ProofOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProofOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProofOp.Merge(m, src) -} -func (m *ProofOp) XXX_Size() int { - return m.Size() -} -func (m *ProofOp) XXX_DiscardUnknown() { - xxx_messageInfo_ProofOp.DiscardUnknown(m) -} - -var xxx_messageInfo_ProofOp proto.InternalMessageInfo - -func (m *ProofOp) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ProofOp) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *ProofOp) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -// Proof is Merkle proof defined by the list of ProofOps -type Proof struct { - Ops []ProofOp `protobuf:"bytes,1,rep,name=ops,proto3" json:"ops"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Proof) Reset() { *m = Proof{} } -func (m *Proof) String() string { return proto.CompactTextString(m) } -func (*Proof) ProtoMessage() {} -func (*Proof) Descriptor() ([]byte, []int) { - return fileDescriptor_9c1c2162d560d38e, []int{1} -} -func (m *Proof) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Proof.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Proof) XXX_Merge(src proto.Message) { - xxx_messageInfo_Proof.Merge(m, src) -} -func (m *Proof) XXX_Size() int { - return m.Size() -} -func (m *Proof) XXX_DiscardUnknown() { - xxx_messageInfo_Proof.DiscardUnknown(m) -} - -var xxx_messageInfo_Proof proto.InternalMessageInfo - -func (m *Proof) GetOps() []ProofOp { - if m != nil { - return m.Ops - } - return nil -} - -func init() { - proto.RegisterType((*ProofOp)(nil), "tendermint.crypto.merkle.ProofOp") - proto.RegisterType((*Proof)(nil), "tendermint.crypto.merkle.Proof") -} - -func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_9c1c2162d560d38e) } - -var fileDescriptor_9c1c2162d560d38e = []byte{ - // 230 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, - 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, - 0xf9, 0x42, 0x12, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x45, 0xb9, 0x99, 0x79, 0x25, 0x7a, 0x10, 0x65, - 0x7a, 0x10, 0x79, 0x29, 0xb5, 0x92, 0x8c, 0xcc, 0xa2, 0x94, 0xf8, 0x82, 0xc4, 0xa2, 0x92, 0x4a, - 0x7d, 0xb0, 0x62, 0xfd, 0xf4, 0xfc, 0xf4, 0x7c, 0x04, 0x0b, 0x62, 0x82, 0x92, 0x33, 0x17, 0x7b, - 0x40, 0x51, 0x7e, 0x7e, 0x9a, 0x7f, 0x81, 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, - 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, 0x2d, 0x24, 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, - 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0x62, 0x82, 0x54, 0xa5, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x83, - 0x85, 0xc0, 0x6c, 0x25, 0x27, 0x2e, 0x56, 0xb0, 0x21, 0x42, 0x96, 0x5c, 0xcc, 0xf9, 0x05, 0xc5, - 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x8a, 0x7a, 0xb8, 0x5c, 0xa7, 0x07, 0xb5, 0xd2, 0x89, - 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x90, 0x1e, 0x27, 0x97, 0x1f, 0x0f, 0xe5, 0x18, 0x57, 0x3c, - 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xa3, - 0xf4, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0xa6, 0x21, 0x33, - 0x51, 0x42, 0x27, 0x89, 0x0d, 0xec, 0x2b, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xcc, - 0x2c, 0x91, 0x35, 0x01, 0x00, 0x00, -} - -func (this *ProofOp) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ProofOp) - if !ok { - that2, ok := that.(ProofOp) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if !bytes.Equal(this.Key, that1.Key) { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Proof) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Proof) - if !ok { - that2, ok := that.(Proof) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Ops) != len(that1.Ops) { - return false - } - for i := range this.Ops { - if !this.Ops[i].Equal(&that1.Ops[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (m *ProofOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProofOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Proof) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Proof) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Proof) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ops) > 0 { - for iNdEx := len(m.Ops) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ops[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMerkle(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { - offset -= sovMerkle(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { - this := &ProofOp{} - this.Type = string(randStringMerkle(r)) - v1 := r.Intn(100) - this.Key = make([]byte, v1) - for i := 0; i < v1; i++ { - this.Key[i] = byte(r.Intn(256)) - } - v2 := r.Intn(100) - this.Data = make([]byte, v2) - for i := 0; i < v2; i++ { - this.Data[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedMerkle(r, 4) - } - return this -} - -func NewPopulatedProof(r randyMerkle, easy bool) *Proof { - this := &Proof{} - if r.Intn(5) != 0 { - v3 := r.Intn(5) - this.Ops = make([]ProofOp, v3) - for i := 0; i < v3; i++ { - v4 := NewPopulatedProofOp(r, easy) - this.Ops[i] = *v4 - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedMerkle(r, 2) - } - return this -} - -type randyMerkle interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int -} - -func randUTF8RuneMerkle(r randyMerkle) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringMerkle(r randyMerkle) string { - v5 := r.Intn(100) - tmps := make([]rune, v5) - for i := 0; i < v5; i++ { - tmps[i] = randUTF8RuneMerkle(r) - } - return string(tmps) -} -func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) - v6 := r.Int63() - if r.Intn(2) == 0 { - v6 *= -1 - } - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6)) - case 1: - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte { - for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) - v >>= 7 - } - dAtA = append(dAtA, uint8(v)) - return dAtA -} -func (m *ProofOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovMerkle(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovMerkle(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovMerkle(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Proof) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ops) > 0 { - for _, e := range m.Ops { - l = e.Size() - n += 1 + l + sovMerkle(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMerkle(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMerkle(x uint64) (n int) { - return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProofOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMerkle - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMerkle - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMerkle - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMerkle(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMerkle - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthMerkle - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Proof) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Proof: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMerkle - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMerkle - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMerkle - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ops = append(m.Ops, ProofOp{}) - if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMerkle(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMerkle - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthMerkle - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMerkle(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMerkle - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMerkle - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMerkle - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMerkle - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMerkle - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMerkle - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMerkle = fmt.Errorf("proto: unexpected end of group") -) diff --git a/crypto/merkle/merkle.proto b/crypto/merkle/merkle.proto deleted file mode 100644 index 159fc58c9..000000000 --- a/crypto/merkle/merkle.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package tendermint.crypto.merkle; -option go_package = "github.com/tendermint/tendermint/crypto/merkle"; - -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md -import "third_party/proto/gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.sizer_all) = true; - -option (gogoproto.populate_all) = true; -option (gogoproto.equal_all) = true; - -//---------------------------------------- -// Message types - -// ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data -// for example neighbouring node hash -message ProofOp { - string type = 1; - bytes key = 2; - bytes data = 3; -} - -// Proof is Merkle proof defined by the list of ProofOps -message Proof { - repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; -} diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 50bcdd0ac..ab43f30e7 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -2,137 +2,235 @@ package merkle import ( "bytes" + "errors" + "fmt" - "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) -//---------------------------------------- -// ProofOp gets converted to an instance of ProofOperator: - -// ProofOperator is a layer for calculating intermediate Merkle roots -// when a series of Merkle trees are chained together. -// Run() takes leaf values from a tree and returns the Merkle -// root for the corresponding tree. It takes and returns a list of bytes -// to allow multiple leaves to be part of a single proof, for instance in a range proof. -// ProofOp() encodes the ProofOperator in a generic way so it can later be -// decoded with OpDecoder. -type ProofOperator interface { - Run([][]byte) ([][]byte, error) - GetKey() []byte - ProofOp() ProofOp -} - -//---------------------------------------- -// Operations on a list of ProofOperators - -// ProofOperators is a slice of ProofOperator(s). -// Each operator will be applied to the input value sequentially -// and the last Merkle root will be verified with already known data -type ProofOperators []ProofOperator +const ( + // MaxAunts is the maximum number of aunts that can be included in a Proof. + // This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes. + // This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs. + MaxAunts = 100 +) -func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { - return poz.Verify(root, keypath, [][]byte{value}) +// Proof represents a Merkle proof. +// NOTE: The convention for proofs is to include leaf hashes but to +// exclude the root hash. +// This convention is implemented across IAVL range proofs as well. +// Keep this consistent unless there's a very good reason to change +// everything. This also affects the generalized proof system as +// well. +type Proof struct { + Total int64 `json:"total"` // Total number of items. + Index int64 `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } -func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { - keys, err := KeyPathToKeys(keypath) - if err != nil { - return +// ProofsFromByteSlices computes inclusion proof for given items. +// proofs[0] is the proof for items[0]. +func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) { + trails, rootSPN := trailsFromByteSlices(items) + rootHash = rootSPN.Hash + proofs = make([]*Proof, len(items)) + for i, trail := range trails { + proofs[i] = &Proof{ + Total: int64(len(items)), + Index: int64(i), + LeafHash: trail.Hash, + Aunts: trail.FlattenAunts(), + } } + return +} - for i, op := range poz { - key := op.GetKey() - if len(key) != 0 { - if len(keys) == 0 { - return errors.Errorf("key path has insufficient # of parts: expected no more keys but got %+v", string(key)) - } - lastKey := keys[len(keys)-1] - if !bytes.Equal(lastKey, key) { - return errors.Errorf("key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) - } - keys = keys[:len(keys)-1] - } - args, err = op.Run(args) - if err != nil { - return - } +// Verify that the Proof proves the root hash. +// Check sp.Index/sp.Total manually if needed +func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { + leafHash := leafHash(leaf) + if sp.Total < 0 { + return errors.New("proof total must be positive") + } + if sp.Index < 0 { + return errors.New("proof index cannot be negative") } - if !bytes.Equal(root, args[0]) { - return errors.Errorf("calculated root hash is invalid: expected %+v but got %+v", root, args[0]) + if !bytes.Equal(sp.LeafHash, leafHash) { + return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) } - if len(keys) != 0 { - return errors.New("keypath not consumed all") + computedHash := sp.ComputeRootHash() + if !bytes.Equal(computedHash, rootHash) { + return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) } return nil } -//---------------------------------------- -// ProofRuntime - main entrypoint +// Compute the root hash given a leaf hash. Does not verify the result. +func (sp *Proof) ComputeRootHash() []byte { + return computeHashFromAunts( + sp.Index, + sp.Total, + sp.LeafHash, + sp.Aunts, + ) +} -type OpDecoder func(ProofOp) (ProofOperator, error) +// String implements the stringer interface for Proof. +// It is a wrapper around StringIndented. +func (sp *Proof) String() string { + return sp.StringIndented("") +} -type ProofRuntime struct { - decoders map[string]OpDecoder +// StringIndented generates a canonical string representation of a Proof. +func (sp *Proof) StringIndented(indent string) string { + return fmt.Sprintf(`Proof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) } -func NewProofRuntime() *ProofRuntime { - return &ProofRuntime{ - decoders: make(map[string]OpDecoder), +// ValidateBasic performs basic validation. +// NOTE: it expects the LeafHash and the elements of Aunts to be of size tmhash.Size, +// and it expects at most MaxAunts elements in Aunts. +func (sp *Proof) ValidateBasic() error { + if sp.Total < 0 { + return errors.New("negative Total") } + if sp.Index < 0 { + return errors.New("negative Index") + } + if len(sp.LeafHash) != tmhash.Size { + return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash)) + } + if len(sp.Aunts) > MaxAunts { + return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts)) + } + for i, auntHash := range sp.Aunts { + if len(auntHash) != tmhash.Size { + return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash)) + } + } + return nil } -func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { - _, ok := prt.decoders[typ] - if ok { - panic("already registered for type " + typ) +func (sp *Proof) ToProto() *tmcrypto.Proof { + if sp == nil { + return nil } - prt.decoders[typ] = dec + pb := new(tmcrypto.Proof) + + pb.Total = sp.Total + pb.Index = sp.Index + pb.LeafHash = sp.LeafHash + pb.Aunts = sp.Aunts + + return pb } -func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { - decoder := prt.decoders[pop.Type] - if decoder == nil { - return nil, errors.Errorf("unrecognized proof type %v", pop.Type) +func ProofFromProto(pb *tmcrypto.Proof) (*Proof, error) { + if pb == nil { + return nil, errors.New("nil proof") } - return decoder(pop) + + sp := new(Proof) + + sp.Total = pb.Total + sp.Index = pb.Index + sp.LeafHash = pb.LeafHash + sp.Aunts = pb.Aunts + + return sp, sp.ValidateBasic() } -func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { - poz := make(ProofOperators, 0, len(proof.Ops)) - for _, pop := range proof.Ops { - operator, err := prt.Decode(pop) - if err != nil { - return nil, errors.Wrap(err, "decoding a proof operator") +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +// Recursive impl. +func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) []byte { + if index >= total || index < 0 || total <= 0 { + return nil + } + switch total { + case 0: + panic("Cannot call computeHashFromAunts() with 0 total") + case 1: + if len(innerHashes) != 0 { + return nil + } + return leafHash + default: + if len(innerHashes) == 0 { + return nil } - poz = append(poz, operator) + numLeft := getSplitPoint(total) + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if leftHash == nil { + return nil + } + return innerHash(leftHash, innerHashes[len(innerHashes)-1]) + } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return innerHash(innerHashes[len(innerHashes)-1], rightHash) } - return poz, nil -} - -func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { - return prt.Verify(proof, root, keypath, [][]byte{value}) } -// TODO In the long run we'll need a method of classifcation of ops, -// whether existence or absence or perhaps a third? -func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) { - return prt.Verify(proof, root, keypath, nil) +// ProofNode is a helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type ProofNode struct { + Hash []byte + Parent *ProofNode + Left *ProofNode // Left sibling (only one of Left,Right is set) + Right *ProofNode // Right sibling (only one of Left,Right is set) } -func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { - poz, err := prt.DecodeProof(proof) - if err != nil { - return errors.Wrap(err, "decoding proof") +// FlattenAunts will return the inner hashes for the item corresponding to the leaf, +// starting from a leaf ProofNode. +func (spn *ProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + switch { + case spn.Left != nil: + innerHashes = append(innerHashes, spn.Left.Hash) + case spn.Right != nil: + innerHashes = append(innerHashes, spn.Right.Hash) + default: + break + } + spn = spn.Parent } - return poz.Verify(root, keypath, args) + return innerHashes } -// DefaultProofRuntime only knows about Simple value -// proofs. -// To use e.g. IAVL proofs, register op-decoders as -// defined in the IAVL package. -func DefaultProofRuntime() (prt *ProofRuntime) { - prt = NewProofRuntime() - prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder) - return +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromByteSlices(items [][]byte) (trails []*ProofNode, root *ProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return []*ProofNode{}, &ProofNode{emptyHash(), nil, nil, nil} + case 1: + trail := &ProofNode{leafHash(items[0]), nil, nil, nil} + return []*ProofNode{trail}, trail + default: + k := getSplitPoint(int64(len(items))) + lefts, leftRoot := trailsFromByteSlices(items[:k]) + rights, rightRoot := trailsFromByteSlices(items[k:]) + rootHash := innerHash(leftRoot.Hash, rightRoot.Hash) + root := &ProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } } diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index 7ea67853b..ca8b5f052 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -2,11 +2,10 @@ package merkle import ( "encoding/hex" + "errors" "fmt" "net/url" "strings" - - "github.com/pkg/errors" ) /* @@ -18,7 +17,7 @@ import ( /32:) For example, for a Cosmos-SDK application where the first two proof layers - are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys + are ValueOps, and the third proof layer is an IAVLValueOp, the keys might look like: 0: []byte("App") @@ -96,13 +95,13 @@ func KeyPathToKeys(path string) (keys [][]byte, err error) { hexPart := part[2:] key, err := hex.DecodeString(hexPart) if err != nil { - return nil, errors.Wrapf(err, "decoding hex-encoded part #%d: /%s", i, part) + return nil, fmt.Errorf("decoding hex-encoded part #%d: /%s: %w", i, part, err) } keys[i] = key } else { key, err := url.PathUnescape(part) if err != nil { - return nil, errors.Wrapf(err, "decoding url-encoded part #%d: /%s", i, part) + return nil, fmt.Errorf("decoding url-encoded part #%d: /%s: %w", i, part, err) } keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... } diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 34c918f4a..22e3e21ca 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -26,7 +26,7 @@ func TestKeyPath(t *testing.T) { keys[i][j] = alphanum[rand.Intn(len(alphanum))] } case KeyEncodingHex: - rand.Read(keys[i]) //nolint: gosec + rand.Read(keys[i]) default: panic("Unexpected encoding") } diff --git a/crypto/merkle/proof_op.go b/crypto/merkle/proof_op.go new file mode 100644 index 000000000..038037cf5 --- /dev/null +++ b/crypto/merkle/proof_op.go @@ -0,0 +1,139 @@ +package merkle + +import ( + "bytes" + "errors" + "fmt" + + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" +) + +//---------------------------------------- +// ProofOp gets converted to an instance of ProofOperator: + +// ProofOperator is a layer for calculating intermediate Merkle roots +// when a series of Merkle trees are chained together. +// Run() takes leaf values from a tree and returns the Merkle +// root for the corresponding tree. It takes and returns a list of bytes +// to allow multiple leaves to be part of a single proof, for instance in a range proof. +// ProofOp() encodes the ProofOperator in a generic way so it can later be +// decoded with OpDecoder. +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() tmcrypto.ProofOp +} + +//---------------------------------------- +// Operations on a list of ProofOperators + +// ProofOperators is a slice of ProofOperator(s). +// Each operator will be applied to the input value sequentially +// and the last Merkle root will be verified with already known data +type ProofOperators []ProofOperator + +func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { + return poz.Verify(root, keypath, [][]byte{value}) +} + +func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { + keys, err := KeyPathToKeys(keypath) + if err != nil { + return + } + + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if len(keys) == 0 { + return fmt.Errorf("key path has insufficient # of parts: expected no more keys but got %+v", string(key)) + } + lastKey := keys[len(keys)-1] + if !bytes.Equal(lastKey, key) { + return fmt.Errorf("key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) + } + keys = keys[:len(keys)-1] + } + args, err = op.Run(args) + if err != nil { + return + } + } + if !bytes.Equal(root, args[0]) { + return fmt.Errorf("calculated root hash is invalid: expected %X but got %X", root, args[0]) + } + if len(keys) != 0 { + return errors.New("keypath not consumed all") + } + return nil +} + +//---------------------------------------- +// ProofRuntime - main entrypoint + +type OpDecoder func(tmcrypto.ProofOp) (ProofOperator, error) + +type ProofRuntime struct { + decoders map[string]OpDecoder +} + +func NewProofRuntime() *ProofRuntime { + return &ProofRuntime{ + decoders: make(map[string]OpDecoder), + } +} + +func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { + _, ok := prt.decoders[typ] + if ok { + panic("already registered for type " + typ) + } + prt.decoders[typ] = dec +} + +func (prt *ProofRuntime) Decode(pop tmcrypto.ProofOp) (ProofOperator, error) { + decoder := prt.decoders[pop.Type] + if decoder == nil { + return nil, fmt.Errorf("unrecognized proof type %v", pop.Type) + } + return decoder(pop) +} + +func (prt *ProofRuntime) DecodeProof(proof *tmcrypto.ProofOps) (ProofOperators, error) { + poz := make(ProofOperators, 0, len(proof.Ops)) + for _, pop := range proof.Ops { + operator, err := prt.Decode(pop) + if err != nil { + return nil, fmt.Errorf("decoding a proof operator: %w", err) + } + poz = append(poz, operator) + } + return poz, nil +} + +func (prt *ProofRuntime) VerifyValue(proof *tmcrypto.ProofOps, root []byte, keypath string, value []byte) (err error) { + return prt.Verify(proof, root, keypath, [][]byte{value}) +} + +// TODO In the long run we'll need a method of classifcation of ops, +// whether existence or absence or perhaps a third? +func (prt *ProofRuntime) VerifyAbsence(proof *tmcrypto.ProofOps, root []byte, keypath string) (err error) { + return prt.Verify(proof, root, keypath, nil) +} + +func (prt *ProofRuntime) Verify(proof *tmcrypto.ProofOps, root []byte, keypath string, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return fmt.Errorf("decoding proof: %w", err) + } + return poz.Verify(root, keypath, args) +} + +// DefaultProofRuntime only knows about value proofs. +// To use e.g. IAVL proofs, register op-decoders as +// defined in the IAVL package. +func DefaultProofRuntime() (prt *ProofRuntime) { + prt = NewProofRuntime() + prt.RegisterOpDecoder(ProofOpValue, ValueOpDecoder) + return +} diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go deleted file mode 100644 index 55337b7b8..000000000 --- a/crypto/merkle/proof_simple_value.go +++ /dev/null @@ -1,92 +0,0 @@ -package merkle - -import ( - "bytes" - "fmt" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto/tmhash" -) - -const ProofOpSimpleValue = "simple:v" - -// SimpleValueOp takes a key and a single value as argument and -// produces the root hash. The corresponding tree structure is -// the SimpleMap tree. SimpleMap takes a Hasher, and currently -// Tendermint uses aminoHasher. SimpleValueOp should support -// the hash function as used in aminoHasher. TODO support -// additional hash functions here as options/args to this -// operator. -// -// If the produced root hash matches the expected hash, the -// proof is good. -type SimpleValueOp struct { - // Encoded in ProofOp.Key. - key []byte - - // To encode in ProofOp.Data - Proof *SimpleProof `json:"simple_proof"` -} - -var _ ProofOperator = SimpleValueOp{} - -func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { - return SimpleValueOp{ - key: key, - Proof: proof, - } -} - -func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { - if pop.Type != ProofOpSimpleValue { - return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) - } - var op SimpleValueOp // a bit strange as we'll discard this, but it works. - err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) - if err != nil { - return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") - } - return NewSimpleValueOp(pop.Key, op.Proof), nil -} - -func (op SimpleValueOp) ProofOp() ProofOp { - bz := cdc.MustMarshalBinaryLengthPrefixed(op) - return ProofOp{ - Type: ProofOpSimpleValue, - Key: op.key, - Data: bz, - } -} - -func (op SimpleValueOp) String() string { - return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey()) -} - -func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { - if len(args) != 1 { - return nil, errors.Errorf("expected 1 arg, got %v", len(args)) - } - value := args[0] - hasher := tmhash.New() - hasher.Write(value) // does not error - vhash := hasher.Sum(nil) - - bz := new(bytes.Buffer) - // Wrap to hash the KVPair. - encodeByteSlice(bz, op.key) // does not error - encodeByteSlice(bz, vhash) // does not error - kvhash := leafHash(bz.Bytes()) - - if !bytes.Equal(kvhash, op.Proof.LeafHash) { - return nil, errors.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) - } - - return [][]byte{ - op.Proof.ComputeRootHash(), - }, nil -} - -func (op SimpleValueOp) GetKey() []byte { - return op.key -} diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index c24e791cb..22ab900f0 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -1,11 +1,14 @@ package merkle import ( + "errors" + "fmt" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" - amino "github.com/tendermint/go-amino" + "github.com/stretchr/testify/require" + + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) const ProofOpDomino = "test:domino" @@ -26,22 +29,18 @@ func NewDominoOp(key, input, output string) DominoOp { } } -//nolint:unused -func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { - if pop.Type != ProofOpDomino { - panic("unexpected proof op type") +func (dop DominoOp) ProofOp() tmcrypto.ProofOp { + dopb := tmcrypto.DominoOp{ + Key: dop.key, + Input: dop.Input, + Output: dop.Output, } - var op DominoOp // a bit strange as we'll discard this, but it works. - err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) + bz, err := dopb.Marshal() if err != nil { - return nil, errors.Wrap(err, "decoding ProofOp.Data into SimpleValueOp") + panic(err) } - return NewDominoOp(string(pop.Key), op.Input, op.Output), nil -} -func (dop DominoOp) ProofOp() ProofOp { - bz := amino.MustMarshalBinaryLengthPrefixed(dop) - return ProofOp{ + return tmcrypto.ProofOp{ Type: ProofOpDomino, Key: []byte(dop.key), Data: bz, @@ -53,7 +52,7 @@ func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { return nil, errors.New("expected input of length 1") } if string(input[0]) != dop.Input { - return nil, errors.Errorf("expected input %v, got %v", + return nil, fmt.Errorf("expected input %v, got %v", dop.Input, string(input[0])) } return [][]byte{[]byte(dop.Output)}, nil @@ -70,8 +69,6 @@ func TestProofOperators(t *testing.T) { // ProofRuntime setup // TODO test this somehow. - // prt := NewProofRuntime() - // prt.RegisterOpDecoder(ProofOpDomino, DominoOpDecoder) // ProofOperators setup op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2") @@ -139,3 +136,65 @@ func TestProofOperators(t *testing.T) { func bz(s string) []byte { return []byte(s) } + +func TestProofValidateBasic(t *testing.T) { + testCases := []struct { + testName string + malleateProof func(*Proof) + errStr string + }{ + {"Good", func(sp *Proof) {}, ""}, + {"Negative Total", func(sp *Proof) { sp.Total = -1 }, "negative Total"}, + {"Negative Index", func(sp *Proof) { sp.Index = -1 }, "negative Index"}, + {"Invalid LeafHash", func(sp *Proof) { sp.LeafHash = make([]byte, 10) }, + "expected LeafHash size to be 32, got 10"}, + {"Too many Aunts", func(sp *Proof) { sp.Aunts = make([][]byte, MaxAunts+1) }, + "expected no more than 100 aunts, got 101"}, + {"Invalid Aunt", func(sp *Proof) { sp.Aunts[0] = make([]byte, 10) }, + "expected Aunts#0 size to be 32, got 10"}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + _, proofs := ProofsFromByteSlices([][]byte{ + []byte("apple"), + []byte("watermelon"), + []byte("kiwi"), + }) + tc.malleateProof(proofs[0]) + err := proofs[0].ValidateBasic() + if tc.errStr != "" { + assert.Contains(t, err.Error(), tc.errStr) + } + }) + } +} +func TestVoteProtobuf(t *testing.T) { + + _, proofs := ProofsFromByteSlices([][]byte{ + []byte("apple"), + []byte("watermelon"), + []byte("kiwi"), + }) + testCases := []struct { + testName string + v1 *Proof + expPass bool + }{ + {"empty proof", &Proof{}, false}, + {"failure nil", nil, false}, + {"success", proofs[0], true}, + } + for _, tc := range testCases { + pb := tc.v1.ToProto() + + v, err := ProofFromProto(pb) + if tc.expPass { + require.NoError(t, err) + require.Equal(t, tc.v1, v, tc.testName) + } else { + require.Error(t, err) + } + } +} diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go new file mode 100644 index 000000000..b613ebe31 --- /dev/null +++ b/crypto/merkle/proof_value.go @@ -0,0 +1,103 @@ +package merkle + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" +) + +const ProofOpValue = "simple:v" + +// ValueOp takes a key and a single value as argument and +// produces the root hash. The corresponding tree structure is +// the SimpleMap tree. SimpleMap takes a Hasher, and currently +// Tendermint uses tmhash. SimpleValueOp should support +// the hash function as used in tmhash. TODO support +// additional hash functions here as options/args to this +// operator. +// +// If the produced root hash matches the expected hash, the +// proof is good. +type ValueOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data + Proof *Proof `json:"proof"` +} + +var _ ProofOperator = ValueOp{} + +func NewValueOp(key []byte, proof *Proof) ValueOp { + return ValueOp{ + key: key, + Proof: proof, + } +} + +func ValueOpDecoder(pop tmcrypto.ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpValue { + return nil, fmt.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpValue) + } + var pbop tmcrypto.ValueOp // a bit strange as we'll discard this, but it works. + err := pbop.Unmarshal(pop.Data) + if err != nil { + return nil, fmt.Errorf("decoding ProofOp.Data into ValueOp: %w", err) + } + + sp, err := ProofFromProto(pbop.Proof) + if err != nil { + return nil, err + } + return NewValueOp(pop.Key, sp), nil +} + +func (op ValueOp) ProofOp() tmcrypto.ProofOp { + pbval := tmcrypto.ValueOp{ + Key: op.key, + Proof: op.Proof.ToProto(), + } + bz, err := pbval.Marshal() + if err != nil { + panic(err) + } + return tmcrypto.ProofOp{ + Type: ProofOpValue, + Key: op.key, + Data: bz, + } +} + +func (op ValueOp) String() string { + return fmt.Sprintf("ValueOp{%v}", op.GetKey()) +} + +func (op ValueOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("expected 1 arg, got %v", len(args)) + } + value := args[0] + hasher := tmhash.New() + hasher.Write(value) //nolint: errcheck // does not error + vhash := hasher.Sum(nil) + + bz := new(bytes.Buffer) + // Wrap to hash the KVPair. + encodeByteSlice(bz, op.key) //nolint: errcheck // does not error + encodeByteSlice(bz, vhash) //nolint: errcheck // does not error + kvhash := leafHash(bz.Bytes()) + + if !bytes.Equal(kvhash, op.Proof.LeafHash) { + return nil, fmt.Errorf("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + } + + return [][]byte{ + op.Proof.ComputeRootHash(), + }, nil +} + +func (op ValueOp) GetKey() []byte { + return op.key +} diff --git a/crypto/merkle/result.go b/crypto/merkle/result.go deleted file mode 100644 index c7bbb575f..000000000 --- a/crypto/merkle/result.go +++ /dev/null @@ -1,53 +0,0 @@ -// nolint: dupl -package merkle - -import ( - "bytes" - "encoding/json" - - "github.com/gogo/protobuf/jsonpb" -) - -//--------------------------------------------------------------------------- -// override JSON marshalling so we emit defaults (ie. disable omitempty) - -var ( - jsonpbMarshaller = jsonpb.Marshaler{ - EnumsAsInts: true, - EmitDefaults: true, - } - jsonpbUnmarshaller = jsonpb.Unmarshaler{} -) - -func (r *ProofOp) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *ProofOp) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -func (r *Proof) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *Proof) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -// Some compile time assertions to ensure we don't -// have accidental runtime surprises later on. -// jsonEncodingRoundTripper ensures that asserted -// interfaces implement both MarshalJSON and UnmarshalJSON - -type jsonRoundTripper interface { - json.Marshaler - json.Unmarshaler -} - -var _ jsonRoundTripper = (*ProofOp)(nil) -var _ jsonRoundTripper = (*Proof)(nil) diff --git a/crypto/merkle/rfc6962_test.go b/crypto/merkle/rfc6962_test.go index 6c508164a..c762cda56 100644 --- a/crypto/merkle/rfc6962_test.go +++ b/crypto/merkle/rfc6962_test.go @@ -28,13 +28,20 @@ func TestRFC6962Hasher(t *testing.T) { leafHash := leafHashTrail.Hash _, leafHashTrail = trailsFromByteSlices([][]byte{{}}) emptyLeafHash := leafHashTrail.Hash + _, emptyHashTrail := trailsFromByteSlices([][]byte{}) + emptyTreeHash := emptyHashTrail.Hash for _, tc := range []struct { desc string got []byte want string }{ - // Since creating a merkle tree of no leaves is unsupported here, we skip - // the corresponding trillian test vector. + // Check that empty trees return the hash of an empty string. + // echo -n '' | sha256sum + { + desc: "RFC6962 Empty Tree", + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:tmhash.Size*2], + got: emptyTreeHash, + }, // Check that the empty hash is not the same as the hash of an empty leaf. // echo -n 00 | xxd -r -p | sha256sum diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go deleted file mode 100644 index 840bebd51..000000000 --- a/crypto/merkle/simple_map.go +++ /dev/null @@ -1,95 +0,0 @@ -package merkle - -import ( - "bytes" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto/tmhash" - "github.com/tendermint/tendermint/libs/kv" -) - -// Merkle tree from a map. -// Leaves are `hash(key) | hash(value)`. -// Leaves are sorted before Merkle hashing. -type simpleMap struct { - kvs kv.Pairs - sorted bool -} - -func newSimpleMap() *simpleMap { - return &simpleMap{ - kvs: nil, - sorted: false, - } -} - -// Set creates a kv pair of the key and the hash of the value, -// and then appends it to simpleMap's kv pairs. -func (sm *simpleMap) Set(key string, value []byte) { - sm.sorted = false - - // The value is hashed, so you can - // check for equality with a cached value (say) - // and make a determination to fetch or not. - vhash := tmhash.Sum(value) - - sm.kvs = append(sm.kvs, kv.Pair{ - Key: []byte(key), - Value: vhash, - }) -} - -// Hash Merkle root hash of items sorted by key -// (UNSTABLE: and by value too if duplicate key). -func (sm *simpleMap) Hash() []byte { - sm.Sort() - return hashKVPairs(sm.kvs) -} - -func (sm *simpleMap) Sort() { - if sm.sorted { - return - } - sm.kvs.Sort() - sm.sorted = true -} - -// Returns a copy of sorted KVPairs. -// NOTE these contain the hashed key and value. -func (sm *simpleMap) KVPairs() kv.Pairs { - sm.Sort() - kvs := make(kv.Pairs, len(sm.kvs)) - copy(kvs, sm.kvs) - return kvs -} - -//---------------------------------------- - -// A local extension to KVPair that can be hashed. -// Key and value are length prefixed and concatenated, -// then hashed. -type KVPair kv.Pair - -// Bytes returns key || value, with both the -// key and value length prefixed. -func (kv KVPair) Bytes() []byte { - var b bytes.Buffer - err := amino.EncodeByteSlice(&b, kv.Key) - if err != nil { - panic(err) - } - err = amino.EncodeByteSlice(&b, kv.Value) - if err != nil { - panic(err) - } - return b.Bytes() -} - -func hashKVPairs(kvs kv.Pairs) []byte { - kvsH := make([][]byte, len(kvs)) - for i, kvp := range kvs { - kvsH[i] = KVPair(kvp).Bytes() - } - return SimpleHashFromByteSlices(kvsH) -} diff --git a/crypto/merkle/simple_map_test.go b/crypto/merkle/simple_map_test.go deleted file mode 100644 index 20868a782..000000000 --- a/crypto/merkle/simple_map_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package merkle - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimpleMap(t *testing.T) { - tests := []struct { - keys []string - values []string // each string gets converted to []byte in test - want string - }{ - {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, - {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, - // swap order with 2 keys - { - []string{"key1", "key2"}, - []string{"value1", "value2"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - { - []string{"key2", "key1"}, - []string{"value2", "value1"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - // swap order with 3 keys - { - []string{"key1", "key2", "key3"}, - []string{"value1", "value2", "value3"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - { - []string{"key1", "key3", "key2"}, - []string{"value1", "value3", "value2"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - } - for i, tc := range tests { - db := newSimpleMap() - for i := 0; i < len(tc.keys); i++ { - db.Set(tc.keys[i], []byte(tc.values[i])) - } - got := db.Hash() - assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) - } -} diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go deleted file mode 100644 index 44b97f606..000000000 --- a/crypto/merkle/simple_proof.go +++ /dev/null @@ -1,232 +0,0 @@ -package merkle - -import ( - "bytes" - "fmt" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto/tmhash" -) - -const ( - // MaxAunts is the maximum number of aunts that can be included in a SimpleProof. - // This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes. - // This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs. - MaxAunts = 100 -) - -// SimpleProof represents a simple Merkle proof. -// NOTE: The convention for proofs is to include leaf hashes but to -// exclude the root hash. -// This convention is implemented across IAVL range proofs as well. -// Keep this consistent unless there's a very good reason to change -// everything. This also affects the generalized proof system as -// well. -type SimpleProof struct { - Total int `json:"total"` // Total number of items. - Index int `json:"index"` // Index of item to prove. - LeafHash []byte `json:"leaf_hash"` // Hash of item value. - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. -} - -// SimpleProofsFromByteSlices computes inclusion proof for given items. -// proofs[0] is the proof for items[0]. -func SimpleProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromByteSlices(items) - rootHash = rootSPN.Hash - proofs = make([]*SimpleProof, len(items)) - for i, trail := range trails { - proofs[i] = &SimpleProof{ - Total: len(items), - Index: i, - LeafHash: trail.Hash, - Aunts: trail.FlattenAunts(), - } - } - return -} - -// SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values -// in the underlying key-value pairs. -// The keys are sorted before the proofs are computed. -func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { - sm := newSimpleMap() - for k, v := range m { - sm.Set(k, v) - } - sm.Sort() - kvs := sm.kvs - kvsBytes := make([][]byte, len(kvs)) - for i, kvp := range kvs { - kvsBytes[i] = KVPair(kvp).Bytes() - } - - rootHash, proofList := SimpleProofsFromByteSlices(kvsBytes) - proofs = make(map[string]*SimpleProof) - keys = make([]string, len(proofList)) - for i, kvp := range kvs { - proofs[string(kvp.Key)] = proofList[i] - keys[i] = string(kvp.Key) - } - return -} - -// Verify that the SimpleProof proves the root hash. -// Check sp.Index/sp.Total manually if needed -func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error { - leafHash := leafHash(leaf) - if sp.Total < 0 { - return errors.New("proof total must be positive") - } - if sp.Index < 0 { - return errors.New("proof index cannot be negative") - } - if !bytes.Equal(sp.LeafHash, leafHash) { - return errors.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) - } - computedHash := sp.ComputeRootHash() - if !bytes.Equal(computedHash, rootHash) { - return errors.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) - } - return nil -} - -// Compute the root hash given a leaf hash. Does not verify the result. -func (sp *SimpleProof) ComputeRootHash() []byte { - return computeHashFromAunts( - sp.Index, - sp.Total, - sp.LeafHash, - sp.Aunts, - ) -} - -// String implements the stringer interface for SimpleProof. -// It is a wrapper around StringIndented. -func (sp *SimpleProof) String() string { - return sp.StringIndented("") -} - -// StringIndented generates a canonical string representation of a SimpleProof. -func (sp *SimpleProof) StringIndented(indent string) string { - return fmt.Sprintf(`SimpleProof{ -%s Aunts: %X -%s}`, - indent, sp.Aunts, - indent) -} - -// ValidateBasic performs basic validation. -// NOTE: it expects the LeafHash and the elements of Aunts to be of size tmhash.Size, -// and it expects at most MaxAunts elements in Aunts. -func (sp *SimpleProof) ValidateBasic() error { - if sp.Total < 0 { - return errors.New("negative Total") - } - if sp.Index < 0 { - return errors.New("negative Index") - } - if len(sp.LeafHash) != tmhash.Size { - return errors.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash)) - } - if len(sp.Aunts) > MaxAunts { - return errors.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts)) - } - for i, auntHash := range sp.Aunts { - if len(auntHash) != tmhash.Size { - return errors.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash)) - } - } - return nil -} - -// Use the leafHash and innerHashes to get the root merkle hash. -// If the length of the innerHashes slice isn't exactly correct, the result is nil. -// Recursive impl. -func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - if index >= total || index < 0 || total <= 0 { - return nil - } - switch total { - case 0: - panic("Cannot call computeHashFromAunts() with 0 total") - case 1: - if len(innerHashes) != 0 { - return nil - } - return leafHash - default: - if len(innerHashes) == 0 { - return nil - } - numLeft := getSplitPoint(total) - if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil - } - return innerHash(leftHash, innerHashes[len(innerHashes)-1]) - } - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return innerHash(innerHashes[len(innerHashes)-1], rightHash) - } -} - -// SimpleProofNode is a helper structure to construct merkle proof. -// The node and the tree is thrown away afterwards. -// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. -// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. -type SimpleProofNode struct { - Hash []byte - Parent *SimpleProofNode - Left *SimpleProofNode // Left sibling (only one of Left,Right is set) - Right *SimpleProofNode // Right sibling (only one of Left,Right is set) -} - -// FlattenAunts will return the inner hashes for the item corresponding to the leaf, -// starting from a leaf SimpleProofNode. -func (spn *SimpleProofNode) FlattenAunts() [][]byte { - // Nonrecursive impl. - innerHashes := [][]byte{} - for spn != nil { - switch { - case spn.Left != nil: - innerHashes = append(innerHashes, spn.Left.Hash) - case spn.Right != nil: - innerHashes = append(innerHashes, spn.Right.Hash) - default: - break - } - spn = spn.Parent - } - return innerHashes -} - -// trails[0].Hash is the leaf hash for items[0]. -// trails[i].Parent.Parent....Parent == root for all i. -func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *SimpleProofNode) { - // Recursive impl. - switch len(items) { - case 0: - return nil, nil - case 1: - trail := &SimpleProofNode{leafHash(items[0]), nil, nil, nil} - return []*SimpleProofNode{trail}, trail - default: - k := getSplitPoint(len(items)) - lefts, leftRoot := trailsFromByteSlices(items[:k]) - rights, rightRoot := trailsFromByteSlices(items[k:]) - rootHash := innerHash(leftRoot.Hash, rightRoot.Hash) - root := &SimpleProofNode{rootHash, nil, nil, nil} - leftRoot.Parent = root - leftRoot.Right = rightRoot - rightRoot.Parent = root - rightRoot.Left = leftRoot - return append(lefts, rights...), root - } -} diff --git a/crypto/merkle/simple_proof_test.go b/crypto/merkle/simple_proof_test.go deleted file mode 100644 index 68e6912fb..000000000 --- a/crypto/merkle/simple_proof_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package merkle - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimpleProofValidateBasic(t *testing.T) { - testCases := []struct { - testName string - malleateProof func(*SimpleProof) - errStr string - }{ - {"Good", func(sp *SimpleProof) {}, ""}, - {"Negative Total", func(sp *SimpleProof) { sp.Total = -1 }, "negative Total"}, - {"Negative Index", func(sp *SimpleProof) { sp.Index = -1 }, "negative Index"}, - {"Invalid LeafHash", func(sp *SimpleProof) { sp.LeafHash = make([]byte, 10) }, - "expected LeafHash size to be 32, got 10"}, - {"Too many Aunts", func(sp *SimpleProof) { sp.Aunts = make([][]byte, MaxAunts+1) }, - "expected no more than 100 aunts, got 101"}, - {"Invalid Aunt", func(sp *SimpleProof) { sp.Aunts[0] = make([]byte, 10) }, - "expected Aunts#0 size to be 32, got 10"}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - _, proofs := SimpleProofsFromByteSlices([][]byte{ - []byte("apple"), - []byte("watermelon"), - []byte("kiwi"), - }) - tc.malleateProof(proofs[0]) - err := proofs[0].ValidateBasic() - if tc.errStr != "" { - assert.Contains(t, err.Error(), tc.errStr) - } - }) - } -} diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/tree.go similarity index 62% rename from crypto/merkle/simple_tree.go rename to crypto/merkle/tree.go index 2a57bbe72..466c43482 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/tree.go @@ -4,37 +4,37 @@ import ( "math/bits" ) -// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice, -// in the provided order. -func SimpleHashFromByteSlices(items [][]byte) []byte { +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(items [][]byte) []byte { switch len(items) { case 0: - return nil + return emptyHash() case 1: return leafHash(items[0]) default: - k := getSplitPoint(len(items)) - left := SimpleHashFromByteSlices(items[:k]) - right := SimpleHashFromByteSlices(items[k:]) + k := getSplitPoint(int64(len(items))) + left := HashFromByteSlices(items[:k]) + right := HashFromByteSlices(items[k:]) return innerHash(left, right) } } -// SimpleHashFromByteSliceIterative is an iterative alternative to -// SimpleHashFromByteSlice motivated by potential performance improvements. +// HashFromByteSliceIterative is an iterative alternative to +// HashFromByteSlice motivated by potential performance improvements. // (#2611) had suggested that an iterative version of -// SimpleHashFromByteSlice would be faster, presumably because +// HashFromByteSlice would be faster, presumably because // we can envision some overhead accumulating from stack // frames and function calls. Additionally, a recursive algorithm risks // hitting the stack limit and causing a stack overflow should the tree // be too large. // -// Provided here is an iterative alternative, a simple test to assert +// Provided here is an iterative alternative, a test to assert // correctness and a benchmark. On the performance side, there appears to // be no overall difference: // -// BenchmarkSimpleHashAlternatives/recursive-4 20000 77677 ns/op -// BenchmarkSimpleHashAlternatives/iterative-4 20000 76802 ns/op +// BenchmarkHashAlternatives/recursive-4 20000 77677 ns/op +// BenchmarkHashAlternatives/iterative-4 20000 76802 ns/op // // On the surface it might seem that the additional overhead is due to // the different allocation patterns of the implementations. The recursive @@ -47,9 +47,9 @@ func SimpleHashFromByteSlices(items [][]byte) []byte { // // These preliminary results suggest: // -// 1. The performance of the SimpleHashFromByteSlice is pretty good +// 1. The performance of the HashFromByteSlice is pretty good // 2. Go has low overhead for recursive functions -// 3. The performance of the SimpleHashFromByteSlice routine is dominated +// 3. The performance of the HashFromByteSlice routine is dominated // by the actual hashing of data // // Although this work is in no way exhaustive, point #3 suggests that @@ -59,7 +59,7 @@ func SimpleHashFromByteSlices(items [][]byte) []byte { // Finally, considering that the recursive implementation is easier to // read, it might not be worthwhile to switch to a less intuitive // implementation for so little benefit. -func SimpleHashFromByteSlicesIterative(input [][]byte) []byte { +func HashFromByteSlicesIterative(input [][]byte) []byte { items := make([][]byte, len(input)) for i, leaf := range input { @@ -70,7 +70,7 @@ func SimpleHashFromByteSlicesIterative(input [][]byte) []byte { for { switch size { case 0: - return nil + return emptyHash() case 1: return items[0] default: @@ -91,26 +91,14 @@ func SimpleHashFromByteSlicesIterative(input [][]byte) []byte { } } -// SimpleHashFromMap computes a Merkle tree from sorted map. -// Like calling SimpleHashFromHashers with -// `item = []byte(Hash(key) | Hash(value))`, -// sorted by `item`. -func SimpleHashFromMap(m map[string][]byte) []byte { - sm := newSimpleMap() - for k, v := range m { - sm.Set(k, v) - } - return sm.Hash() -} - // getSplitPoint returns the largest power of 2 less than length -func getSplitPoint(length int) int { +func getSplitPoint(length int64) int64 { if length < 1 { panic("Trying to split a tree with size < 1") } uLength := uint(length) bitlen := bits.Len(uLength) - k := 1 << uint(bitlen-1) + k := int64(1 << uint(bitlen-1)) if k == length { k >>= 1 } diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/tree_test.go similarity index 55% rename from crypto/merkle/simple_tree_test.go rename to crypto/merkle/tree_test.go index 665017a97..bdf469836 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/tree_test.go @@ -1,8 +1,10 @@ package merkle import ( + "encoding/hex" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -17,7 +19,36 @@ func (tI testItem) Hash() []byte { return []byte(tI) } -func TestSimpleProof(t *testing.T) { +func TestHashFromByteSlices(t *testing.T) { + testcases := map[string]struct { + slices [][]byte + expectHash string // in hex format + }{ + "nil": {nil, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + "empty": {[][]byte{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + "single": {[][]byte{{1, 2, 3}}, "054edec1d0211f624fed0cbca9d4f9400b0e491c43742af2c5b0abebf0c990d8"}, + "single blank": {[][]byte{{}}, "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"}, + "two": {[][]byte{{1, 2, 3}, {4, 5, 6}}, "82e6cfce00453804379b53962939eaa7906b39904be0813fcadd31b100773c4b"}, + "many": { + [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}}, + "f326493eceab4f2d9ffbc78c59432a0a005d6ea98392045c74df5d14a113be18", + }, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + hash := HashFromByteSlices(tc.slices) + assert.Equal(t, tc.expectHash, hex.EncodeToString(hash)) + }) + } +} + +func TestProof(t *testing.T) { + + // Try an empty proof first + rootHash, proofs := ProofsFromByteSlices([][]byte{}) + require.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(rootHash)) + require.Empty(t, proofs) total := 100 @@ -26,9 +57,9 @@ func TestSimpleProof(t *testing.T) { items[i] = testItem(tmrand.Bytes(tmhash.Size)) } - rootHash := SimpleHashFromByteSlices(items) + rootHash = HashFromByteSlices(items) - rootHash2, proofs := SimpleProofsFromByteSlices(items) + rootHash2, proofs := ProofsFromByteSlices(items) require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) @@ -37,9 +68,9 @@ func TestSimpleProof(t *testing.T) { proof := proofs[i] // Check total/index - require.Equal(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + require.EqualValues(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) - require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) + require.EqualValues(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) // Verify success err := proof.Verify(rootHash, item) @@ -70,7 +101,7 @@ func TestSimpleProof(t *testing.T) { } } -func TestSimpleHashAlternatives(t *testing.T) { +func TestHashAlternatives(t *testing.T) { total := 100 @@ -79,12 +110,12 @@ func TestSimpleHashAlternatives(t *testing.T) { items[i] = testItem(tmrand.Bytes(tmhash.Size)) } - rootHash1 := SimpleHashFromByteSlicesIterative(items) - rootHash2 := SimpleHashFromByteSlices(items) + rootHash1 := HashFromByteSlicesIterative(items) + rootHash2 := HashFromByteSlices(items) require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2) } -func BenchmarkSimpleHashAlternatives(b *testing.B) { +func BenchmarkHashAlternatives(b *testing.B) { total := 100 items := make([][]byte, total) @@ -95,21 +126,21 @@ func BenchmarkSimpleHashAlternatives(b *testing.B) { b.ResetTimer() b.Run("recursive", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = SimpleHashFromByteSlices(items) + _ = HashFromByteSlices(items) } }) b.Run("iterative", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = SimpleHashFromByteSlicesIterative(items) + _ = HashFromByteSlicesIterative(items) } }) } func Test_getSplitPoint(t *testing.T) { tests := []struct { - length int - want int + length int64 + want int64 }{ {1, 0}, {2, 1}, @@ -125,6 +156,6 @@ func Test_getSplitPoint(t *testing.T) { } for _, tt := range tests { got := getSplitPoint(tt.length) - require.Equal(t, tt.want, got, "getSplitPoint(%d) = %v, want %v", tt.length, got, tt.want) + require.EqualValues(t, tt.want, got, "getSplitPoint(%d) = %v, want %v", tt.length, got, tt.want) } } diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go index 97a47879b..6a5c7e6a3 100644 --- a/crypto/merkle/types.go +++ b/crypto/merkle/types.go @@ -1,9 +1,8 @@ package merkle import ( + "encoding/binary" "io" - - amino "github.com/tendermint/go-amino" ) // Tree is a Merkle tree interface. @@ -29,5 +28,12 @@ type Tree interface { // Uvarint length prefixed byteslice func encodeByteSlice(w io.Writer, bz []byte) (err error) { - return amino.EncodeByteSlice(w, bz) + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], uint64(len(bz))) + _, err = w.Write(buf[0:n]) + if err != nil { + return + } + _, err = w.Write(bz) + return } diff --git a/crypto/multisig/bitarray/compact_bit_array.go b/crypto/multisig/bitarray/compact_bit_array.go deleted file mode 100644 index 890a4c9c7..000000000 --- a/crypto/multisig/bitarray/compact_bit_array.go +++ /dev/null @@ -1,233 +0,0 @@ -package bitarray - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "regexp" - "strings" -) - -// CompactBitArray is an implementation of a space efficient bit array. -// This is used to ensure that the encoded data takes up a minimal amount of -// space after amino encoding. -// This is not thread safe, and is not intended for concurrent usage. -type CompactBitArray struct { - ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems. - Elems []byte `json:"bits"` -} - -// NewCompactBitArray returns a new compact bit array. -// It returns nil if the number of bits is zero. -func NewCompactBitArray(bits int) *CompactBitArray { - if bits <= 0 { - return nil - } - return &CompactBitArray{ - ExtraBitsStored: byte(bits % 8), - Elems: make([]byte, (bits+7)/8), - } -} - -// Size returns the number of bits in the bitarray -func (bA *CompactBitArray) Size() int { - if bA == nil { - return 0 - } else if bA.ExtraBitsStored == byte(0) { - return len(bA.Elems) * 8 - } - // num_bits = 8*num_full_bytes + overflow_in_last_byte - // num_full_bytes = (len(bA.Elems)-1) - return (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored) -} - -// GetIndex returns the bit at index i within the bit array. -// The behavior is undefined if i >= bA.Size() -func (bA *CompactBitArray) GetIndex(i int) bool { - if bA == nil { - return false - } - if i >= bA.Size() { - return false - } - return bA.Elems[i>>3]&(uint8(1)< 0 -} - -// SetIndex sets the bit at index i within the bit array. -// The behavior is undefined if i >= bA.Size() -func (bA *CompactBitArray) SetIndex(i int, v bool) bool { - if bA == nil { - return false - } - if i >= bA.Size() { - return false - } - if v { - bA.Elems[i>>3] |= (uint8(1) << uint8(7-(i%8))) - } else { - bA.Elems[i>>3] &= ^(uint8(1) << uint8(7-(i%8))) - } - return true -} - -// NumTrueBitsBefore returns the number of bits set to true before the -// given index. e.g. if bA = _XX__XX, NumOfTrueBitsBefore(4) = 2, since -// there are two bits set to true before index 4. -func (bA *CompactBitArray) NumTrueBitsBefore(index int) int { - numTrueValues := 0 - for i := 0; i < index; i++ { - if bA.GetIndex(i) { - numTrueValues++ - } - } - return numTrueValues -} - -// Copy returns a copy of the provided bit array. -func (bA *CompactBitArray) Copy() *CompactBitArray { - if bA == nil { - return nil - } - c := make([]byte, len(bA.Elems)) - copy(c, bA.Elems) - return &CompactBitArray{ - ExtraBitsStored: bA.ExtraBitsStored, - Elems: c, - } -} - -// String returns a string representation of CompactBitArray: BA{}, -// where is a sequence of 'x' (1) and '_' (0). -// The includes spaces and newlines to help people. -// For a simple sequence of 'x' and '_' characters with no spaces or newlines, -// see the MarshalJSON() method. -// Example: "BA{_x_}" or "nil-BitArray" for nil. -func (bA *CompactBitArray) String() string { - return bA.StringIndented("") -} - -// StringIndented returns the same thing as String(), but applies the indent -// at every 10th bit, and twice at every 50th bit. -func (bA *CompactBitArray) StringIndented(indent string) string { - if bA == nil { - return "nil-BitArray" - } - lines := []string{} - bits := "" - size := bA.Size() - for i := 0; i < size; i++ { - if bA.GetIndex(i) { - bits += "x" - } else { - bits += "_" - } - if i%100 == 99 { - lines = append(lines, bits) - bits = "" - } - if i%10 == 9 { - bits += indent - } - if i%50 == 49 { - bits += indent - } - } - if len(bits) > 0 { - lines = append(lines, bits) - } - return fmt.Sprintf("BA{%v:%v}", size, strings.Join(lines, indent)) -} - -// MarshalJSON implements json.Marshaler interface by marshaling bit array -// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. -func (bA *CompactBitArray) MarshalJSON() ([]byte, error) { - if bA == nil { - return []byte("null"), nil - } - - bits := `"` - size := bA.Size() - for i := 0; i < size; i++ { - if bA.GetIndex(i) { - bits += `x` - } else { - bits += `_` - } - } - bits += `"` - return []byte(bits), nil -} - -var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) - -// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom -// JSON description. -func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error { - b := string(bz) - if b == "null" { - // This is required e.g. for encoding/json when decoding - // into a pointer with pre-allocated BitArray. - bA.ExtraBitsStored = 0 - bA.Elems = nil - return nil - } - - // Validate 'b'. - match := bitArrayJSONRegexp.FindStringSubmatch(b) - if match == nil { - return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) - } - bits := match[1] - - // Construct new CompactBitArray and copy over. - numBits := len(bits) - bA2 := NewCompactBitArray(numBits) - for i := 0; i < numBits; i++ { - if bits[i] == 'x' { - bA2.SetIndex(i, true) - } - } - *bA = *bA2 - return nil -} - -// CompactMarshal is a space efficient encoding for CompactBitArray. -// It is not amino compatible. -func (bA *CompactBitArray) CompactMarshal() []byte { - size := bA.Size() - if size <= 0 { - return []byte("null") - } - bz := make([]byte, 0, size/8) - // length prefix number of bits, not number of bytes. This difference - // takes 3-4 bits in encoding, as opposed to instead encoding the number of - // bytes (saving 3-4 bits) and including the offset as a full byte. - bz = appendUvarint(bz, uint64(size)) - bz = append(bz, bA.Elems...) - return bz -} - -// CompactUnmarshal is a space efficient decoding for CompactBitArray. -// It is not amino compatible. -func CompactUnmarshal(bz []byte) (*CompactBitArray, error) { - if len(bz) < 2 { - return nil, errors.New("compact bit array: invalid compact unmarshal size") - } else if bytes.Equal(bz, []byte("null")) { - return NewCompactBitArray(0), nil - } - size, n := binary.Uvarint(bz) - bz = bz[n:] - if len(bz) != int(size+7)/8 { - return nil, errors.New("compact bit array: invalid compact unmarshal size") - } - - bA := &CompactBitArray{byte(int(size % 8)), bz} - return bA, nil -} - -func appendUvarint(b []byte, x uint64) []byte { - var a [binary.MaxVarintLen64]byte - n := binary.PutUvarint(a[:], x) - return append(b, a[:n]...) -} diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go deleted file mode 100644 index f086dc877..000000000 --- a/crypto/multisig/bitarray/compact_bit_array_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package bitarray - -import ( - "encoding/json" - "math/rand" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - tmrand "github.com/tendermint/tendermint/libs/rand" -) - -func randCompactBitArray(bits int) (*CompactBitArray, []byte) { - numBytes := (bits + 7) / 8 - src := tmrand.Bytes((bits + 7) / 8) - bA := NewCompactBitArray(bits) - - for i := 0; i < numBytes-1; i++ { - for j := uint8(0); j < 8; j++ { - bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0) - } - } - // Set remaining bits - for i := uint8(0); i < 8-bA.ExtraBitsStored; i++ { - bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0) - } - return bA, src -} - -func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { - bitList := []int{-127, -128, -1 << 31} - for _, bits := range bitList { - bA := NewCompactBitArray(bits) - require.Nil(t, bA) - } -} - -func TestJSONMarshalUnmarshal(t *testing.T) { - - bA1 := NewCompactBitArray(0) - bA2 := NewCompactBitArray(1) - - bA3 := NewCompactBitArray(1) - bA3.SetIndex(0, true) - - bA4 := NewCompactBitArray(5) - bA4.SetIndex(0, true) - bA4.SetIndex(1, true) - - bA5 := NewCompactBitArray(9) - bA5.SetIndex(0, true) - bA5.SetIndex(1, true) - bA5.SetIndex(8, true) - - bA6 := NewCompactBitArray(16) - bA6.SetIndex(0, true) - bA6.SetIndex(1, true) - bA6.SetIndex(8, false) - bA6.SetIndex(15, true) - - testCases := []struct { - bA *CompactBitArray - marshalledBA string - }{ - {nil, `null`}, - {bA1, `null`}, - {bA2, `"_"`}, - {bA3, `"x"`}, - {bA4, `"xx___"`}, - {bA5, `"xx______x"`}, - {bA6, `"xx_____________x"`}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.bA.String(), func(t *testing.T) { - bz, err := json.Marshal(tc.bA) - require.NoError(t, err) - - assert.Equal(t, tc.marshalledBA, string(bz)) - - var unmarshalledBA *CompactBitArray - err = json.Unmarshal(bz, &unmarshalledBA) - require.NoError(t, err) - - if tc.bA == nil { - require.Nil(t, unmarshalledBA) - } else { - require.NotNil(t, unmarshalledBA) - assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) - if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { - assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) - } - } - }) - } -} - -func TestCompactMarshalUnmarshal(t *testing.T) { - bA1 := NewCompactBitArray(0) - bA2 := NewCompactBitArray(1) - - bA3 := NewCompactBitArray(1) - bA3.SetIndex(0, true) - - bA4 := NewCompactBitArray(5) - bA4.SetIndex(0, true) - bA4.SetIndex(1, true) - - bA5 := NewCompactBitArray(9) - bA5.SetIndex(0, true) - bA5.SetIndex(1, true) - bA5.SetIndex(8, true) - - bA6 := NewCompactBitArray(16) - bA6.SetIndex(0, true) - bA6.SetIndex(1, true) - bA6.SetIndex(8, false) - bA6.SetIndex(15, true) - - testCases := []struct { - bA *CompactBitArray - marshalledBA []byte - }{ - {nil, []byte("null")}, - {bA1, []byte("null")}, - {bA2, []byte{byte(1), byte(0)}}, - {bA3, []byte{byte(1), byte(128)}}, - {bA4, []byte{byte(5), byte(192)}}, - {bA5, []byte{byte(9), byte(192), byte(128)}}, - {bA6, []byte{byte(16), byte(192), byte(1)}}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.bA.String(), func(t *testing.T) { - bz := tc.bA.CompactMarshal() - - assert.Equal(t, tc.marshalledBA, bz) - - unmarshalledBA, err := CompactUnmarshal(bz) - require.NoError(t, err) - if tc.bA == nil { - require.Nil(t, unmarshalledBA) - } else { - require.NotNil(t, unmarshalledBA) - assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) - if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { - assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) - } - } - }) - } -} - -func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) { - testCases := []struct { - marshalledBA string - bAIndex []int - trueValueIndex []int - }{ - {`"_____"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}}, - {`"x"`, []int{0}, []int{0}}, - {`"_x"`, []int{1}, []int{0}}, - {`"x___xxxx"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}}, - {`"__x_xx_x__x_x___"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}}, - {`"______________xx"`, []int{14, 15}, []int{0, 1}}, - } - for tcIndex, tc := range testCases { - tc := tc - tcIndex := tcIndex - t.Run(tc.marshalledBA, func(t *testing.T) { - var bA *CompactBitArray - err := json.Unmarshal([]byte(tc.marshalledBA), &bA) - require.NoError(t, err) - - for i := 0; i < len(tc.bAIndex); i++ { - - require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i) - } - }) - } -} - -func TestCompactBitArrayGetSetIndex(t *testing.T) { - r := rand.New(rand.NewSource(100)) - numTests := 10 - numBitsPerArr := 100 - for i := 0; i < numTests; i++ { - bits := r.Intn(1000) - bA, _ := randCompactBitArray(bits) - - for j := 0; j < numBitsPerArr; j++ { - copy := bA.Copy() - index := r.Intn(bits) - val := (r.Int63() % 2) == 0 - bA.SetIndex(index, val) - require.Equal(t, val, bA.GetIndex(index), "bA.SetIndex(%d, %v) failed on bit array: %s", index, val, copy) - } - } -} diff --git a/crypto/multisig/codec.go b/crypto/multisig/codec.go deleted file mode 100644 index cc1e12f92..000000000 --- a/crypto/multisig/codec.go +++ /dev/null @@ -1,30 +0,0 @@ -package multisig - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/sr25519" -) - -// TODO: Figure out API for others to either add their own pubkey types, or -// to make verify / marshal accept a cdc. -const ( - PubKeyMultisigThresholdAminoRoute = "tendermint/PubKeyMultisigThreshold" -) - -var cdc = amino.NewCodec() - -func init() { - cdc.RegisterInterface((*crypto.PubKey)(nil), nil) - cdc.RegisterConcrete(PubKeyMultisigThreshold{}, - PubKeyMultisigThresholdAminoRoute, nil) - cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, - ed25519.PubKeyAminoName, nil) - cdc.RegisterConcrete(sr25519.PubKeySr25519{}, - sr25519.PubKeyAminoName, nil) - cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, - secp256k1.PubKeyAminoName, nil) -} diff --git a/crypto/multisig/multisignature.go b/crypto/multisig/multisignature.go deleted file mode 100644 index 1e3bef4e1..000000000 --- a/crypto/multisig/multisignature.go +++ /dev/null @@ -1,77 +0,0 @@ -package multisig - -import ( - "fmt" - "strings" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/multisig/bitarray" -) - -// Multisignature is used to represent the signature object used in the multisigs. -// Sigs is a list of signatures, sorted by corresponding index. -type Multisignature struct { - BitArray *bitarray.CompactBitArray - Sigs [][]byte -} - -// NewMultisig returns a new Multisignature of size n. -func NewMultisig(n int) *Multisignature { - // Default the signature list to have a capacity of two, since we can - // expect that most multisigs will require multiple signers. - return &Multisignature{bitarray.NewCompactBitArray(n), make([][]byte, 0, 2)} -} - -// GetIndex returns the index of pk in keys. Returns -1 if not found -func getIndex(pk crypto.PubKey, keys []crypto.PubKey) int { - for i := 0; i < len(keys); i++ { - if pk.Equals(keys[i]) { - return i - } - } - return -1 -} - -// AddSignature adds a signature to the multisig, at the corresponding index. -// If the signature already exists, replace it. -func (mSig *Multisignature) AddSignature(sig []byte, index int) { - newSigIndex := mSig.BitArray.NumTrueBitsBefore(index) - // Signature already exists, just replace the value there - if mSig.BitArray.GetIndex(index) { - mSig.Sigs[newSigIndex] = sig - return - } - mSig.BitArray.SetIndex(index, true) - // Optimization if the index is the greatest index - if newSigIndex == len(mSig.Sigs) { - mSig.Sigs = append(mSig.Sigs, sig) - return - } - // Expand slice by one with a dummy element, move all elements after i - // over by one, then place the new signature in that gap. - mSig.Sigs = append(mSig.Sigs, make([]byte, 0)) - copy(mSig.Sigs[newSigIndex+1:], mSig.Sigs[newSigIndex:]) - mSig.Sigs[newSigIndex] = sig -} - -// AddSignatureFromPubKey adds a signature to the multisig, at the index in -// keys corresponding to the provided pubkey. -func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error { - index := getIndex(pubkey, keys) - if index == -1 { - keysStr := make([]string, len(keys)) - for i, k := range keys { - keysStr[i] = fmt.Sprintf("%X", k.Bytes()) - } - - return fmt.Errorf("provided key %X doesn't exist in pubkeys: \n%s", pubkey.Bytes(), strings.Join(keysStr, "\n")) - } - - mSig.AddSignature(sig, index) - return nil -} - -// Marshal the multisignature with amino -func (mSig *Multisignature) Marshal() []byte { - return cdc.MustMarshalBinaryBare(mSig) -} diff --git a/crypto/multisig/threshold_pubkey.go b/crypto/multisig/threshold_pubkey.go deleted file mode 100644 index 36e2dc2dd..000000000 --- a/crypto/multisig/threshold_pubkey.go +++ /dev/null @@ -1,96 +0,0 @@ -package multisig - -import ( - "github.com/tendermint/tendermint/crypto" -) - -// PubKeyMultisigThreshold implements a K of N threshold multisig. -type PubKeyMultisigThreshold struct { - K uint `json:"threshold"` - PubKeys []crypto.PubKey `json:"pubkeys"` -} - -var _ crypto.PubKey = PubKeyMultisigThreshold{} - -// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold. -// Panics if len(pubkeys) < k or 0 >= k. -func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey { - if k <= 0 { - panic("threshold k of n multisignature: k <= 0") - } - if len(pubkeys) < k { - panic("threshold k of n multisignature: len(pubkeys) < k") - } - for _, pubkey := range pubkeys { - if pubkey == nil { - panic("nil pubkey") - } - } - return PubKeyMultisigThreshold{uint(k), pubkeys} -} - -// VerifyBytes expects sig to be an amino encoded version of a MultiSignature. -// Returns true iff the multisignature contains k or more signatures -// for the correct corresponding keys, -// and all signatures are valid. (Not just k of the signatures) -// The multisig uses a bitarray, so multiple signatures for the same key is not -// a concern. -func (pk PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool { - var sig Multisignature - err := cdc.UnmarshalBinaryBare(marshalledSig, &sig) - if err != nil { - return false - } - size := sig.BitArray.Size() - // ensure bit array is the correct size - if len(pk.PubKeys) != size { - return false - } - // ensure size of signature list - if len(sig.Sigs) < int(pk.K) || len(sig.Sigs) > size { - return false - } - // ensure at least k signatures are set - if sig.BitArray.NumTrueBitsBefore(size) < int(pk.K) { - return false - } - // index in the list of signatures which we are concerned with. - sigIndex := 0 - for i := 0; i < size; i++ { - if sig.BitArray.GetIndex(i) { - if !pk.PubKeys[i].VerifyBytes(msg, sig.Sigs[sigIndex]) { - return false - } - sigIndex++ - } - } - return true -} - -// Bytes returns the amino encoded version of the PubKeyMultisigThreshold -func (pk PubKeyMultisigThreshold) Bytes() []byte { - return cdc.MustMarshalBinaryBare(pk) -} - -// Address returns tmhash(PubKeyMultisigThreshold.Bytes()) -func (pk PubKeyMultisigThreshold) Address() crypto.Address { - return crypto.AddressHash(pk.Bytes()) -} - -// Equals returns true iff pk and other both have the same number of keys, and -// all constituent keys are the same, and in the same order. -func (pk PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool { - otherKey, sameType := other.(PubKeyMultisigThreshold) - if !sameType { - return false - } - if pk.K != otherKey.K || len(pk.PubKeys) != len(otherKey.PubKeys) { - return false - } - for i := 0; i < len(pk.PubKeys); i++ { - if !pk.PubKeys[i].Equals(otherKey.PubKeys[i]) { - return false - } - } - return true -} diff --git a/crypto/multisig/threshold_pubkey_test.go b/crypto/multisig/threshold_pubkey_test.go deleted file mode 100644 index 34b6a4773..000000000 --- a/crypto/multisig/threshold_pubkey_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package multisig - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/sr25519" -) - -// This tests multisig functionality, but it expects the first k signatures to be valid -// TODO: Adapt it to give more flexibility about first k signatures being valid -func TestThresholdMultisigValidCases(t *testing.T) { - pkSet1, sigSet1 := generatePubKeysAndSignatures(5, []byte{1, 2, 3, 4}) - cases := []struct { - msg []byte - k int - pubkeys []crypto.PubKey - signingIndices []int - // signatures should be the same size as signingIndices. - signatures [][]byte - passAfterKSignatures []bool - }{ - { - msg: []byte{1, 2, 3, 4}, - k: 2, - pubkeys: pkSet1, - signingIndices: []int{0, 3, 1}, - signatures: sigSet1, - passAfterKSignatures: []bool{false}, - }, - } - for tcIndex, tc := range cases { - multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys) - multisignature := NewMultisig(len(tc.pubkeys)) - - for i := 0; i < tc.k-1; i++ { - signingIndex := tc.signingIndices[i] - require.NoError( - t, - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), - ) - require.False( - t, - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed when i < k, tc %d, i %d", tcIndex, i, - ) - require.NoError( - t, - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), - ) - require.Equal( - t, - i+1, - len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, - ) - } - - require.False( - t, - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig passed with k - 1 sigs, tc %d", tcIndex, - ) - require.NoError( - t, - multisignature.AddSignatureFromPubKey( - tc.signatures[tc.signingIndices[tc.k]], - tc.pubkeys[tc.signingIndices[tc.k]], - tc.pubkeys, - ), - ) - require.True( - t, - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig failed after k good signatures, tc %d", tcIndex, - ) - - for i := tc.k + 1; i < len(tc.signingIndices); i++ { - signingIndex := tc.signingIndices[i] - - require.NoError( - t, - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), - ) - require.Equal( - t, - tc.passAfterKSignatures[i-tc.k-1], - multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), - "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i, - ) - require.NoError( - t, - multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys), - ) - require.Equal( - t, - i+1, - len(multisignature.Sigs), - "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex, - ) - } - } -} - -// TODO: Fully replace this test with table driven tests -func TestThresholdMultisigDuplicateSignatures(t *testing.T) { - msg := []byte{1, 2, 3, 4, 5} - pubkeys, sigs := generatePubKeysAndSignatures(5, msg) - multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) - multisignature := NewMultisig(5) - require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) - multisignature.AddSignatureFromPubKey(sigs[0], pubkeys[0], pubkeys) - // Add second signature manually - multisignature.Sigs = append(multisignature.Sigs, sigs[0]) - require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) -} - -// TODO: Fully replace this test with table driven tests -func TestMultiSigPubKeyEquality(t *testing.T) { - msg := []byte{1, 2, 3, 4} - pubkeys, _ := generatePubKeysAndSignatures(5, msg) - multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) - var unmarshalledMultisig PubKeyMultisigThreshold - cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig) - require.True(t, multisigKey.Equals(unmarshalledMultisig)) - - // Ensure that reordering pubkeys is treated as a different pubkey - pubkeysCpy := make([]crypto.PubKey, 5) - copy(pubkeysCpy, pubkeys) - pubkeysCpy[4] = pubkeys[3] - pubkeysCpy[3] = pubkeys[4] - multisigKey2 := NewPubKeyMultisigThreshold(2, pubkeysCpy) - require.False(t, multisigKey.Equals(multisigKey2)) -} - -func TestAddress(t *testing.T) { - msg := []byte{1, 2, 3, 4} - pubkeys, _ := generatePubKeysAndSignatures(5, msg) - multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) - require.Len(t, multisigKey.Address().Bytes(), 20) -} - -func TestPubKeyMultisigThresholdAminoToIface(t *testing.T) { - msg := []byte{1, 2, 3, 4} - pubkeys, _ := generatePubKeysAndSignatures(5, msg) - multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) - - ab, err := cdc.MarshalBinaryLengthPrefixed(multisigKey) - require.NoError(t, err) - // like other crypto.Pubkey implementations (e.g. ed25519.PubKeyEd25519), - // PubKeyMultisigThreshold should be deserializable into a crypto.PubKey: - var pubKey crypto.PubKey - err = cdc.UnmarshalBinaryLengthPrefixed(ab, &pubKey) - require.NoError(t, err) - - require.Equal(t, multisigKey, pubKey) -} - -func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) { - pubkeys = make([]crypto.PubKey, n) - signatures = make([][]byte, n) - for i := 0; i < n; i++ { - var privkey crypto.PrivKey - switch rand.Int63() % 3 { - case 0: - privkey = ed25519.GenPrivKey() - case 1: - privkey = secp256k1.GenPrivKey() - case 2: - privkey = sr25519.GenPrivKey() - } - pubkeys[i] = privkey.PubKey() - signatures[i], _ = privkey.Sign(msg) - } - return -} diff --git a/crypto/secp256k1/bench_test.go b/crypto/secp256k1/bench_test.go deleted file mode 100644 index 4f651b762..000000000 --- a/crypto/secp256k1/bench_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package secp256k1 - -import ( - "io" - "testing" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/internal/benchmarking" -) - -func BenchmarkKeyGeneration(b *testing.B) { - benchmarkKeygenWrapper := func(reader io.Reader) crypto.PrivKey { - return genPrivKey(reader) - } - benchmarking.BenchmarkKeyGeneration(b, benchmarkKeygenWrapper) -} - -func BenchmarkSigning(b *testing.B) { - priv := GenPrivKey() - benchmarking.BenchmarkSigning(b, priv) -} - -func BenchmarkVerification(b *testing.B) { - priv := GenPrivKey() - benchmarking.BenchmarkVerification(b, priv) -} diff --git a/crypto/secp256k1/internal/secp256k1/.gitignore b/crypto/secp256k1/internal/secp256k1/.gitignore deleted file mode 100644 index 802b6744a..000000000 --- a/crypto/secp256k1/internal/secp256k1/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -*~ diff --git a/crypto/secp256k1/internal/secp256k1/LICENSE b/crypto/secp256k1/internal/secp256k1/LICENSE deleted file mode 100644 index f9090e142..000000000 --- a/crypto/secp256k1/internal/secp256k1/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2010 The Go Authors. All rights reserved. -Copyright (c) 2011 ThePiachu. All rights reserved. -Copyright (c) 2015 Jeffrey Wilcke. All rights reserved. -Copyright (c) 2015 Felix Lange. All rights reserved. -Copyright (c) 2015 Gustav Simonsson. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/secp256k1/internal/secp256k1/README.md b/crypto/secp256k1/internal/secp256k1/README.md deleted file mode 100644 index d899ca270..000000000 --- a/crypto/secp256k1/internal/secp256k1/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This package is copied from https://github.com/ethereum/go-ethereum/tree/729bf365b5f17325be9107b63b233da54100eec6/crypto/secp256k1 - -Unlike the rest of go-ethereum it is MIT licensed so compatible with our Apache2.0 license. We opt to copy in here rather than depend on go-ethereum to avoid issues with vendoring of the GPL parts of that repository by downstream. diff --git a/crypto/secp256k1/internal/secp256k1/curve.go b/crypto/secp256k1/internal/secp256k1/curve.go deleted file mode 100644 index 7a2387365..000000000 --- a/crypto/secp256k1/internal/secp256k1/curve.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Copyright 2011 ThePiachu. All rights reserved. -// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// * The name of ThePiachu may not be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// nolint:gocritic -package secp256k1 - -import ( - "crypto/elliptic" - "math/big" - "unsafe" -) - -/* -#include "libsecp256k1/include/secp256k1.h" -extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, - const unsigned char *point, - const unsigned char *scalar); -*/ -import "C" - -const ( - // number of bits in a big.Word - wordBits = 32 << (uint64(^big.Word(0)) >> 63) - // number of bytes in a big.Word - wordBytes = wordBits / 8 -) - -// readBits encodes the absolute value of bigint as big-endian bytes. Callers -// must ensure that buf has enough space. If buf is too short the result will -// be incomplete. -func readBits(bigint *big.Int, buf []byte) { - i := len(buf) - for _, d := range bigint.Bits() { - for j := 0; j < wordBytes && i > 0; j++ { - i-- - buf[i] = byte(d) - d >>= 8 - } - } -} - -// This code is from https://github.com/ThePiachu/GoBit and implements -// several Koblitz elliptic curves over prime fields. -// -// The curve methods, internally, on Jacobian coordinates. For a given -// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, -// z1) where x = x1/z1² and y = y1/z1³. The greatest speedups come -// when the whole calculation can be performed within the transform -// (as in ScalarMult and ScalarBaseMult). But even for Add and Double, -// it's faster to apply and reverse the transform than to operate in -// affine coordinates. - -// A BitCurve represents a Koblitz Curve with a=0. -// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html -type BitCurve struct { - P *big.Int // the order of the underlying field - N *big.Int // the order of the base point - B *big.Int // the constant of the BitCurve equation - Gx, Gy *big.Int // (x,y) of the base point - BitSize int // the size of the underlying field -} - -func (BitCurve *BitCurve) Params() *elliptic.CurveParams { - return &elliptic.CurveParams{ - P: BitCurve.P, - N: BitCurve.N, - B: BitCurve.B, - Gx: BitCurve.Gx, - Gy: BitCurve.Gy, - BitSize: BitCurve.BitSize, - } -} - -// IsOnCurve returns true if the given (x,y) lies on the BitCurve. -func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { - // y² = x³ + b - y2 := new(big.Int).Mul(y, y) //y² - y2.Mod(y2, BitCurve.P) //y²%P - - x3 := new(big.Int).Mul(x, x) //x² - x3.Mul(x3, x) //x³ - - x3.Add(x3, BitCurve.B) //x³+B - x3.Mod(x3, BitCurve.P) //(x³+B)%P - - return x3.Cmp(y2) == 0 -} - -//TODO: double check if the function is okay -// affineFromJacobian reverses the Jacobian transform. See the comment at the -// top of the file. -func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { - zinv := new(big.Int).ModInverse(z, BitCurve.P) - zinvsq := new(big.Int).Mul(zinv, zinv) - - xOut = new(big.Int).Mul(x, zinvsq) - xOut.Mod(xOut, BitCurve.P) - zinvsq.Mul(zinvsq, zinv) - yOut = new(big.Int).Mul(y, zinvsq) - yOut.Mod(yOut, BitCurve.P) - return -} - -// Add returns the sum of (x1,y1) and (x2,y2) -func (BitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { - z := new(big.Int).SetInt64(1) - return BitCurve.affineFromJacobian(BitCurve.addJacobian(x1, y1, z, x2, y2, z)) -} - -// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and -// (x2, y2, z2) and returns their sum, also in Jacobian form. -func (BitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl - z1z1 := new(big.Int).Mul(z1, z1) - z1z1.Mod(z1z1, BitCurve.P) - z2z2 := new(big.Int).Mul(z2, z2) - z2z2.Mod(z2z2, BitCurve.P) - - u1 := new(big.Int).Mul(x1, z2z2) - u1.Mod(u1, BitCurve.P) - u2 := new(big.Int).Mul(x2, z1z1) - u2.Mod(u2, BitCurve.P) - h := new(big.Int).Sub(u2, u1) - if h.Sign() == -1 { - h.Add(h, BitCurve.P) - } - i := new(big.Int).Lsh(h, 1) - i.Mul(i, i) - j := new(big.Int).Mul(h, i) - - s1 := new(big.Int).Mul(y1, z2) - s1.Mul(s1, z2z2) - s1.Mod(s1, BitCurve.P) - s2 := new(big.Int).Mul(y2, z1) - s2.Mul(s2, z1z1) - s2.Mod(s2, BitCurve.P) - r := new(big.Int).Sub(s2, s1) - if r.Sign() == -1 { - r.Add(r, BitCurve.P) - } - r.Lsh(r, 1) - v := new(big.Int).Mul(u1, i) - - x3 := new(big.Int).Set(r) - x3.Mul(x3, x3) - x3.Sub(x3, j) - x3.Sub(x3, v) - x3.Sub(x3, v) - x3.Mod(x3, BitCurve.P) - - y3 := new(big.Int).Set(r) - v.Sub(v, x3) - y3.Mul(y3, v) - s1.Mul(s1, j) - s1.Lsh(s1, 1) - y3.Sub(y3, s1) - y3.Mod(y3, BitCurve.P) - - z3 := new(big.Int).Add(z1, z2) - z3.Mul(z3, z3) - z3.Sub(z3, z1z1) - if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) - } - z3.Sub(z3, z2z2) - if z3.Sign() == -1 { - z3.Add(z3, BitCurve.P) - } - z3.Mul(z3, h) - z3.Mod(z3, BitCurve.P) - - return x3, y3, z3 -} - -// Double returns 2*(x,y) -func (BitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { - z1 := new(big.Int).SetInt64(1) - return BitCurve.affineFromJacobian(BitCurve.doubleJacobian(x1, y1, z1)) -} - -// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and -// returns its double, also in Jacobian form. -func (BitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - - a := new(big.Int).Mul(x, x) //X1² - b := new(big.Int).Mul(y, y) //Y1² - c := new(big.Int).Mul(b, b) //B² - - d := new(big.Int).Add(x, b) //X1+B - d.Mul(d, d) //(X1+B)² - d.Sub(d, a) //(X1+B)²-A - d.Sub(d, c) //(X1+B)²-A-C - d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) - - e := new(big.Int).Mul(big.NewInt(3), a) //3*A - f := new(big.Int).Mul(e, e) //E² - - x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D - x3.Sub(f, x3) //F-2*D - x3.Mod(x3, BitCurve.P) - - y3 := new(big.Int).Sub(d, x3) //D-X3 - y3.Mul(e, y3) //E*(D-X3) - y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C - y3.Mod(y3, BitCurve.P) - - z3 := new(big.Int).Mul(y, z) //Y1*Z1 - z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 - z3.Mod(z3, BitCurve.P) - - return x3, y3, z3 -} - -func (BitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) { - // Ensure scalar is exactly 32 bytes. We pad always, even if - // scalar is 32 bytes long, to avoid a timing side channel. - if len(scalar) > 32 { - panic("can't handle scalars > 256 bits") - } - // NOTE: potential timing issue - padded := make([]byte, 32) - copy(padded[32-len(scalar):], scalar) - scalar = padded - - // Do the multiplication in C, updating point. - point := make([]byte, 64) - readBits(Bx, point[:32]) - readBits(By, point[32:]) - - pointPtr := (*C.uchar)(unsafe.Pointer(&point[0])) - scalarPtr := (*C.uchar)(unsafe.Pointer(&scalar[0])) - res := C.secp256k1_ext_scalar_mul(context, pointPtr, scalarPtr) - - // Unpack the result and clear temporaries. - x := new(big.Int).SetBytes(point[:32]) - y := new(big.Int).SetBytes(point[32:]) - for i := range point { - point[i] = 0 - } - for i := range padded { - scalar[i] = 0 - } - if res != 1 { - return nil, nil - } - return x, y -} - -// ScalarBaseMult returns k*G, where G is the base point of the group and k is -// an integer in big-endian form. -func (BitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { - return BitCurve.ScalarMult(BitCurve.Gx, BitCurve.Gy, k) -} - -// Marshal converts a point into the form specified in section 4.3.6 of ANSI -// X9.62. -func (BitCurve *BitCurve) Marshal(x, y *big.Int) []byte { - byteLen := (BitCurve.BitSize + 7) >> 3 - ret := make([]byte, 1+2*byteLen) - ret[0] = 4 // uncompressed point flag - readBits(x, ret[1:1+byteLen]) - readBits(y, ret[1+byteLen:]) - return ret -} - -// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On -// error, x = nil. -func (BitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { - byteLen := (BitCurve.BitSize + 7) >> 3 - if len(data) != 1+2*byteLen { - return - } - if data[0] != 4 { // uncompressed form - return - } - x = new(big.Int).SetBytes(data[1 : 1+byteLen]) - y = new(big.Int).SetBytes(data[1+byteLen:]) - return -} - -var theCurve = new(BitCurve) - -func init() { - // See SEC 2 section 2.7.1 - // curve parameters taken from: - // http://www.secg.org/sec2-v2.pdf - theCurve.P, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 0) - theCurve.N, _ = new(big.Int).SetString("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 0) - theCurve.B, _ = new(big.Int).SetString("0x0000000000000000000000000000000000000000000000000000000000000007", 0) - theCurve.Gx, _ = new(big.Int).SetString("0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 0) - theCurve.Gy, _ = new(big.Int).SetString("0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 0) - theCurve.BitSize = 256 -} - -// S256 returns a BitCurve which implements secp256k1. -func S256() *BitCurve { - return theCurve -} diff --git a/crypto/secp256k1/internal/secp256k1/ext.h b/crypto/secp256k1/internal/secp256k1/ext.h deleted file mode 100644 index e422fe4b4..000000000 --- a/crypto/secp256k1/internal/secp256k1/ext.h +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// secp256k1_context_create_sign_verify creates a context for signing and signature verification. -static secp256k1_context* secp256k1_context_create_sign_verify() { - return secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); -} - -// secp256k1_ext_ecdsa_recover recovers the public key of an encoded compact signature. -// -// Returns: 1: recovery was successful -// 0: recovery was not successful -// Args: ctx: pointer to a context object (cannot be NULL) -// Out: pubkey_out: the serialized 65-byte public key of the signer (cannot be NULL) -// In: sigdata: pointer to a 65-byte signature with the recovery id at the end (cannot be NULL) -// msgdata: pointer to a 32-byte message (cannot be NULL) -static int secp256k1_ext_ecdsa_recover( - const secp256k1_context* ctx, - unsigned char *pubkey_out, - const unsigned char *sigdata, - const unsigned char *msgdata -) { - secp256k1_ecdsa_recoverable_signature sig; - secp256k1_pubkey pubkey; - - if (!secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &sig, sigdata, (int)sigdata[64])) { - return 0; - } - if (!secp256k1_ecdsa_recover(ctx, &pubkey, &sig, msgdata)) { - return 0; - } - size_t outputlen = 65; - return secp256k1_ec_pubkey_serialize(ctx, pubkey_out, &outputlen, &pubkey, SECP256K1_EC_UNCOMPRESSED); -} - -// secp256k1_ext_ecdsa_verify verifies an encoded compact signature. -// -// Returns: 1: signature is valid -// 0: signature is invalid -// Args: ctx: pointer to a context object (cannot be NULL) -// In: sigdata: pointer to a 64-byte signature (cannot be NULL) -// msgdata: pointer to a 32-byte message (cannot be NULL) -// pubkeydata: pointer to public key data (cannot be NULL) -// pubkeylen: length of pubkeydata -static int secp256k1_ext_ecdsa_verify( - const secp256k1_context* ctx, - const unsigned char *sigdata, - const unsigned char *msgdata, - const unsigned char *pubkeydata, - size_t pubkeylen -) { - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pubkey; - - if (!secp256k1_ecdsa_signature_parse_compact(ctx, &sig, sigdata)) { - return 0; - } - if (!secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeydata, pubkeylen)) { - return 0; - } - return secp256k1_ecdsa_verify(ctx, &sig, msgdata, &pubkey); -} - -// secp256k1_ext_reencode_pubkey decodes then encodes a public key. It can be used to -// convert between public key formats. The input/output formats are chosen depending on the -// length of the input/output buffers. -// -// Returns: 1: conversion successful -// 0: conversion unsuccessful -// Args: ctx: pointer to a context object (cannot be NULL) -// Out: out: output buffer that will contain the reencoded key (cannot be NULL) -// In: outlen: length of out (33 for compressed keys, 65 for uncompressed keys) -// pubkeydata: the input public key (cannot be NULL) -// pubkeylen: length of pubkeydata -static int secp256k1_ext_reencode_pubkey( - const secp256k1_context* ctx, - unsigned char *out, - size_t outlen, - const unsigned char *pubkeydata, - size_t pubkeylen -) { - secp256k1_pubkey pubkey; - - if (!secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeydata, pubkeylen)) { - return 0; - } - unsigned int flag = (outlen == 33) ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED; - return secp256k1_ec_pubkey_serialize(ctx, out, &outlen, &pubkey, flag); -} - -// secp256k1_ext_scalar_mul multiplies a point by a scalar in constant time. -// -// Returns: 1: multiplication was successful -// 0: scalar was invalid (zero or overflow) -// Args: ctx: pointer to a context object (cannot be NULL) -// Out: point: the multiplied point (usually secret) -// In: point: pointer to a 64-byte public point, -// encoded as two 256bit big-endian numbers. -// scalar: a 32-byte scalar with which to multiply the point -int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, unsigned char *point, const unsigned char *scalar) { - int ret = 0; - int overflow = 0; - secp256k1_fe feX, feY; - secp256k1_gej res; - secp256k1_ge ge; - secp256k1_scalar s; - ARG_CHECK(point != NULL); - ARG_CHECK(scalar != NULL); - (void)ctx; - - secp256k1_fe_set_b32(&feX, point); - secp256k1_fe_set_b32(&feY, point+32); - secp256k1_ge_set_xy(&ge, &feX, &feY); - secp256k1_scalar_set_b32(&s, scalar, &overflow); - if (overflow || secp256k1_scalar_is_zero(&s)) { - ret = 0; - } else { - secp256k1_ecmult_const(&res, &ge, &s); - secp256k1_ge_set_gej(&ge, &res); - /* Note: can't use secp256k1_pubkey_save here because it is not constant time. */ - secp256k1_fe_normalize(&ge.x); - secp256k1_fe_normalize(&ge.y); - secp256k1_fe_get_b32(point, &ge.x); - secp256k1_fe_get_b32(point+32, &ge.y); - ret = 1; - } - secp256k1_scalar_clear(&s); - return ret; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/.gitignore b/crypto/secp256k1/internal/secp256k1/libsecp256k1/.gitignore deleted file mode 100644 index 87fea161b..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/.gitignore +++ /dev/null @@ -1,49 +0,0 @@ -bench_inv -bench_ecdh -bench_sign -bench_verify -bench_schnorr_verify -bench_recover -bench_internal -tests -exhaustive_tests -gen_context -*.exe -*.so -*.a -!.gitignore - -Makefile -configure -.libs/ -Makefile.in -aclocal.m4 -autom4te.cache/ -config.log -config.status -*.tar.gz -*.la -libtool -.deps/ -.dirstamp -*.lo -*.o -*~ -src/libsecp256k1-config.h -src/libsecp256k1-config.h.in -src/ecmult_static_context.h -build-aux/config.guess -build-aux/config.sub -build-aux/depcomp -build-aux/install-sh -build-aux/ltmain.sh -build-aux/m4/libtool.m4 -build-aux/m4/lt~obsolete.m4 -build-aux/m4/ltoptions.m4 -build-aux/m4/ltsugar.m4 -build-aux/m4/ltversion.m4 -build-aux/missing -build-aux/compile -build-aux/test-driver -src/stamp-h1 -libsecp256k1.pc diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/.travis.yml b/crypto/secp256k1/internal/secp256k1/libsecp256k1/.travis.yml deleted file mode 100644 index 243952924..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/.travis.yml +++ /dev/null @@ -1,69 +0,0 @@ -language: c -sudo: false -addons: - apt: - packages: libgmp-dev -compiler: - - clang - - gcc -cache: - directories: - - src/java/guava/ -env: - global: - - FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no STATICPRECOMPUTATION=yes ASM=no BUILD=check EXTRAFLAGS= HOST= ECDH=no RECOVERY=no EXPERIMENTAL=no - - GUAVA_URL=https://search.maven.org/remotecontent?filepath=com/google/guava/guava/18.0/guava-18.0.jar GUAVA_JAR=src/java/guava/guava-18.0.jar - matrix: - - SCALAR=32bit RECOVERY=yes - - SCALAR=32bit FIELD=32bit ECDH=yes EXPERIMENTAL=yes - - SCALAR=64bit - - FIELD=64bit RECOVERY=yes - - FIELD=64bit ENDOMORPHISM=yes - - FIELD=64bit ENDOMORPHISM=yes ECDH=yes EXPERIMENTAL=yes - - FIELD=64bit ASM=x86_64 - - FIELD=64bit ENDOMORPHISM=yes ASM=x86_64 - - FIELD=32bit ENDOMORPHISM=yes - - BIGNUM=no - - BIGNUM=no ENDOMORPHISM=yes RECOVERY=yes EXPERIMENTAL=yes - - BIGNUM=no STATICPRECOMPUTATION=no - - BUILD=distcheck - - EXTRAFLAGS=CPPFLAGS=-DDETERMINISTIC - - EXTRAFLAGS=CFLAGS=-O0 - - BUILD=check-java ECDH=yes EXPERIMENTAL=yes -matrix: - fast_finish: true - include: - - compiler: clang - env: HOST=i686-linux-gnu ENDOMORPHISM=yes - addons: - apt: - packages: - - gcc-multilib - - libgmp-dev:i386 - - compiler: clang - env: HOST=i686-linux-gnu - addons: - apt: - packages: - - gcc-multilib - - compiler: gcc - env: HOST=i686-linux-gnu ENDOMORPHISM=yes - addons: - apt: - packages: - - gcc-multilib - - compiler: gcc - env: HOST=i686-linux-gnu - addons: - apt: - packages: - - gcc-multilib - - libgmp-dev:i386 -before_install: mkdir -p `dirname $GUAVA_JAR` -install: if [ ! -f $GUAVA_JAR ]; then wget $GUAVA_URL -O $GUAVA_JAR; fi -before_script: ./autogen.sh -script: - - if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi - - if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi - - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY $EXTRAFLAGS $USE_HOST && make -j2 $BUILD -os: linux diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/COPYING b/crypto/secp256k1/internal/secp256k1/libsecp256k1/COPYING deleted file mode 100644 index 4522a5990..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Pieter Wuille - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/Makefile.am b/crypto/secp256k1/internal/secp256k1/libsecp256k1/Makefile.am deleted file mode 100644 index c071fbe27..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/Makefile.am +++ /dev/null @@ -1,177 +0,0 @@ -ACLOCAL_AMFLAGS = -I build-aux/m4 - -lib_LTLIBRARIES = libsecp256k1.la -if USE_JNI -JNI_LIB = libsecp256k1_jni.la -noinst_LTLIBRARIES = $(JNI_LIB) -else -JNI_LIB = -endif -include_HEADERS = include/secp256k1.h -noinst_HEADERS = -noinst_HEADERS += src/scalar.h -noinst_HEADERS += src/scalar_4x64.h -noinst_HEADERS += src/scalar_8x32.h -noinst_HEADERS += src/scalar_low.h -noinst_HEADERS += src/scalar_impl.h -noinst_HEADERS += src/scalar_4x64_impl.h -noinst_HEADERS += src/scalar_8x32_impl.h -noinst_HEADERS += src/scalar_low_impl.h -noinst_HEADERS += src/group.h -noinst_HEADERS += src/group_impl.h -noinst_HEADERS += src/num_gmp.h -noinst_HEADERS += src/num_gmp_impl.h -noinst_HEADERS += src/ecdsa.h -noinst_HEADERS += src/ecdsa_impl.h -noinst_HEADERS += src/eckey.h -noinst_HEADERS += src/eckey_impl.h -noinst_HEADERS += src/ecmult.h -noinst_HEADERS += src/ecmult_impl.h -noinst_HEADERS += src/ecmult_const.h -noinst_HEADERS += src/ecmult_const_impl.h -noinst_HEADERS += src/ecmult_gen.h -noinst_HEADERS += src/ecmult_gen_impl.h -noinst_HEADERS += src/num.h -noinst_HEADERS += src/num_impl.h -noinst_HEADERS += src/field_10x26.h -noinst_HEADERS += src/field_10x26_impl.h -noinst_HEADERS += src/field_5x52.h -noinst_HEADERS += src/field_5x52_impl.h -noinst_HEADERS += src/field_5x52_int128_impl.h -noinst_HEADERS += src/field_5x52_asm_impl.h -noinst_HEADERS += src/java/org_bitcoin_NativeSecp256k1.h -noinst_HEADERS += src/java/org_bitcoin_Secp256k1Context.h -noinst_HEADERS += src/util.h -noinst_HEADERS += src/testrand.h -noinst_HEADERS += src/testrand_impl.h -noinst_HEADERS += src/hash.h -noinst_HEADERS += src/hash_impl.h -noinst_HEADERS += src/field.h -noinst_HEADERS += src/field_impl.h -noinst_HEADERS += src/bench.h -noinst_HEADERS += contrib/lax_der_parsing.h -noinst_HEADERS += contrib/lax_der_parsing.c -noinst_HEADERS += contrib/lax_der_privatekey_parsing.h -noinst_HEADERS += contrib/lax_der_privatekey_parsing.c - -if USE_EXTERNAL_ASM -COMMON_LIB = libsecp256k1_common.la -noinst_LTLIBRARIES = $(COMMON_LIB) -else -COMMON_LIB = -endif - -pkgconfigdir = $(libdir)/pkgconfig -pkgconfig_DATA = libsecp256k1.pc - -if USE_EXTERNAL_ASM -if USE_ASM_ARM -libsecp256k1_common_la_SOURCES = src/asm/field_10x26_arm.s -endif -endif - -libsecp256k1_la_SOURCES = src/secp256k1.c -libsecp256k1_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) -libsecp256k1_la_LIBADD = $(JNI_LIB) $(SECP_LIBS) $(COMMON_LIB) - -libsecp256k1_jni_la_SOURCES = src/java/org_bitcoin_NativeSecp256k1.c src/java/org_bitcoin_Secp256k1Context.c -libsecp256k1_jni_la_CPPFLAGS = -DSECP256K1_BUILD $(JNI_INCLUDES) - -noinst_PROGRAMS = -if USE_BENCHMARK -noinst_PROGRAMS += bench_verify bench_sign bench_internal -bench_verify_SOURCES = src/bench_verify.c -bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) -bench_sign_SOURCES = src/bench_sign.c -bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) -bench_internal_SOURCES = src/bench_internal.c -bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) -endif - -TESTS = -if USE_TESTS -noinst_PROGRAMS += tests -tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) -if !ENABLE_COVERAGE -tests_CPPFLAGS += -DVERIFY -endif -tests_LDADD = $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) -tests_LDFLAGS = -static -TESTS += tests -endif - -if USE_EXHAUSTIVE_TESTS -noinst_PROGRAMS += exhaustive_tests -exhaustive_tests_SOURCES = src/tests_exhaustive.c -exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES) -if !ENABLE_COVERAGE -exhaustive_tests_CPPFLAGS += -DVERIFY -endif -exhaustive_tests_LDADD = $(SECP_LIBS) -exhaustive_tests_LDFLAGS = -static -TESTS += exhaustive_tests -endif - -JAVAROOT=src/java -JAVAORG=org/bitcoin -JAVA_GUAVA=$(srcdir)/$(JAVAROOT)/guava/guava-18.0.jar -CLASSPATH_ENV=CLASSPATH=$(JAVA_GUAVA) -JAVA_FILES= \ - $(JAVAROOT)/$(JAVAORG)/NativeSecp256k1.java \ - $(JAVAROOT)/$(JAVAORG)/NativeSecp256k1Test.java \ - $(JAVAROOT)/$(JAVAORG)/NativeSecp256k1Util.java \ - $(JAVAROOT)/$(JAVAORG)/Secp256k1Context.java - -if USE_JNI - -$(JAVA_GUAVA): - @echo Guava is missing. Fetch it via: \ - wget https://search.maven.org/remotecontent?filepath=com/google/guava/guava/18.0/guava-18.0.jar -O $(@) - @false - -.stamp-java: $(JAVA_FILES) - @echo Compiling $^ - $(AM_V_at)$(CLASSPATH_ENV) javac $^ - @touch $@ - -if USE_TESTS - -check-java: libsecp256k1.la $(JAVA_GUAVA) .stamp-java - $(AM_V_at)java -Djava.library.path="./:./src:./src/.libs:.libs/" -cp "$(JAVA_GUAVA):$(JAVAROOT)" $(JAVAORG)/NativeSecp256k1Test - -endif -endif - -if USE_ECMULT_STATIC_PRECOMPUTATION -CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -CFLAGS_FOR_BUILD += -Wall -Wextra -Wno-unused-function - -gen_context_OBJECTS = gen_context.o -gen_context_BIN = gen_context$(BUILD_EXEEXT) -gen_%.o: src/gen_%.c - $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ - -$(gen_context_BIN): $(gen_context_OBJECTS) - $(CC_FOR_BUILD) $^ -o $@ - -$(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h -$(tests_OBJECTS): src/ecmult_static_context.h -$(bench_internal_OBJECTS): src/ecmult_static_context.h - -src/ecmult_static_context.h: $(gen_context_BIN) - ./$(gen_context_BIN) - -CLEANFILES = $(gen_context_BIN) src/ecmult_static_context.h $(JAVAROOT)/$(JAVAORG)/*.class .stamp-java -endif - -EXTRA_DIST = autogen.sh src/gen_context.c src/basic-config.h $(JAVA_FILES) - -if ENABLE_MODULE_ECDH -include src/modules/ecdh/Makefile.am.include -endif - -if ENABLE_MODULE_RECOVERY -include src/modules/recovery/Makefile.am.include -endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/README.md b/crypto/secp256k1/internal/secp256k1/libsecp256k1/README.md deleted file mode 100644 index 8cd344ea8..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/README.md +++ /dev/null @@ -1,61 +0,0 @@ -libsecp256k1 -============ - -[![Build Status](https://travis-ci.org/bitcoin-core/secp256k1.svg?branch=master)](https://travis-ci.org/bitcoin-core/secp256k1) - -Optimized C library for EC operations on curve secp256k1. - -This library is a work in progress and is being used to research best practices. Use at your own risk. - -Features: -* secp256k1 ECDSA signing/verification and key generation. -* Adding/multiplying private/public keys. -* Serialization/parsing of private keys, public keys, signatures. -* Constant time, constant memory access signing and pubkey generation. -* Derandomized DSA (via RFC6979 or with a caller provided function.) -* Very efficient implementation. - -Implementation details ----------------------- - -* General - * No runtime heap allocation. - * Extensive testing infrastructure. - * Structured to facilitate review and analysis. - * Intended to be portable to any system with a C89 compiler and uint64_t support. - * Expose only higher level interfaces to minimize the API surface and improve application security. ("Be difficult to use insecurely.") -* Field operations - * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). - * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). - * Using 10 26-bit limbs. - * Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman). -* Scalar operations - * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. - * Using 4 64-bit limbs (relying on __int128 support in the compiler). - * Using 8 32-bit limbs. -* Group operations - * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). - * Use addition between points in Jacobian and affine coordinates where possible. - * Use a unified addition/doubling formula where necessary to avoid data-dependent branches. - * Point/x comparison without a field inversion by comparison in the Jacobian coordinate space. -* Point multiplication for verification (a*P + b*G). - * Use wNAF notation for point multiplicands. - * Use a much larger window for multiples of G, using precomputed multiples. - * Use Shamir's trick to do the multiplication with the public key and the generator simultaneously. - * Optionally (off by default) use secp256k1's efficiently-computable endomorphism to split the P multiplicand into 2 half-sized ones. -* Point multiplication for signing - * Use a precomputed table of multiples of powers of 16 multiplied with the generator, so general multiplication becomes a series of additions. - * Access the table with branch-free conditional moves so memory access is uniform. - * No data-dependent branches - * The precomputed tables add and eventually subtract points for which no known scalar (private key) is known, preventing even an attacker with control over the private key used to control the data internally. - -Build steps ------------ - -libsecp256k1 is built using autotools: - - $ ./autogen.sh - $ ./configure - $ make - $ ./tests - $ sudo make install # optional diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/TODO b/crypto/secp256k1/internal/secp256k1/libsecp256k1/TODO deleted file mode 100644 index a300e1c5e..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/TODO +++ /dev/null @@ -1,3 +0,0 @@ -* Unit tests for fieldelem/groupelem, including ones intended to - trigger fieldelem's boundary cases. -* Complete constant-time operations for signing/keygen diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/autogen.sh b/crypto/secp256k1/internal/secp256k1/libsecp256k1/autogen.sh deleted file mode 100755 index 65286b935..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/autogen.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -set -e -autoreconf -if --warnings=all diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_jni_include_dir.m4 b/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_jni_include_dir.m4 deleted file mode 100644 index 1fc362761..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_jni_include_dir.m4 +++ /dev/null @@ -1,140 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_jni_include_dir.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_JNI_INCLUDE_DIR -# -# DESCRIPTION -# -# AX_JNI_INCLUDE_DIR finds include directories needed for compiling -# programs using the JNI interface. -# -# JNI include directories are usually in the Java distribution. This is -# deduced from the value of $JAVA_HOME, $JAVAC, or the path to "javac", in -# that order. When this macro completes, a list of directories is left in -# the variable JNI_INCLUDE_DIRS. -# -# Example usage follows: -# -# AX_JNI_INCLUDE_DIR -# -# for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS -# do -# CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR" -# done -# -# If you want to force a specific compiler: -# -# - at the configure.in level, set JAVAC=yourcompiler before calling -# AX_JNI_INCLUDE_DIR -# -# - at the configure level, setenv JAVAC -# -# Note: This macro can work with the autoconf M4 macros for Java programs. -# This particular macro is not part of the original set of macros. -# -# LICENSE -# -# Copyright (c) 2008 Don Anderson -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 10 - -AU_ALIAS([AC_JNI_INCLUDE_DIR], [AX_JNI_INCLUDE_DIR]) -AC_DEFUN([AX_JNI_INCLUDE_DIR],[ - -JNI_INCLUDE_DIRS="" - -if test "x$JAVA_HOME" != x; then - _JTOPDIR="$JAVA_HOME" -else - if test "x$JAVAC" = x; then - JAVAC=javac - fi - AC_PATH_PROG([_ACJNI_JAVAC], [$JAVAC], [no]) - if test "x$_ACJNI_JAVAC" = xno; then - AC_MSG_WARN([cannot find JDK; try setting \$JAVAC or \$JAVA_HOME]) - fi - _ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC") - _JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'` -fi - -case "$host_os" in - darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'` - _JINC="$_JTOPDIR/Headers";; - *) _JINC="$_JTOPDIR/include";; -esac -_AS_ECHO_LOG([_JTOPDIR=$_JTOPDIR]) -_AS_ECHO_LOG([_JINC=$_JINC]) - -# On Mac OS X 10.6.4, jni.h is a symlink: -# /System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/jni.h -# -> ../../CurrentJDK/Headers/jni.h. - -AC_CACHE_CHECK(jni headers, ac_cv_jni_header_path, -[ -if test -f "$_JINC/jni.h"; then - ac_cv_jni_header_path="$_JINC" - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $ac_cv_jni_header_path" -else - _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'` - if test -f "$_JTOPDIR/include/jni.h"; then - ac_cv_jni_header_path="$_JTOPDIR/include" - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $ac_cv_jni_header_path" - else - ac_cv_jni_header_path=none - fi -fi -]) - - - -# get the likely subdirectories for system specific java includes -case "$host_os" in -bsdi*) _JNI_INC_SUBDIRS="bsdos";; -darwin*) _JNI_INC_SUBDIRS="darwin";; -freebsd*) _JNI_INC_SUBDIRS="freebsd";; -linux*) _JNI_INC_SUBDIRS="linux genunix";; -osf*) _JNI_INC_SUBDIRS="alpha";; -solaris*) _JNI_INC_SUBDIRS="solaris";; -mingw*) _JNI_INC_SUBDIRS="win32";; -cygwin*) _JNI_INC_SUBDIRS="win32";; -*) _JNI_INC_SUBDIRS="genunix";; -esac - -if test "x$ac_cv_jni_header_path" != "xnone"; then - # add any subdirectories that are present - for JINCSUBDIR in $_JNI_INC_SUBDIRS - do - if test -d "$_JTOPDIR/include/$JINCSUBDIR"; then - JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$JINCSUBDIR" - fi - done -fi -]) - -# _ACJNI_FOLLOW_SYMLINKS -# Follows symbolic links on , -# finally setting variable _ACJNI_FOLLOWED -# ---------------------------------------- -AC_DEFUN([_ACJNI_FOLLOW_SYMLINKS],[ -# find the include directory relative to the javac executable -_cur="$1" -while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do - AC_MSG_CHECKING([symlink for $_cur]) - _slink=`ls -ld "$_cur" | sed 's/.* -> //'` - case "$_slink" in - /*) _cur="$_slink";; - # 'X' avoids triggering unwanted echo options. - *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";; - esac - AC_MSG_RESULT([$_cur]) -done -_ACJNI_FOLLOWED="$_cur" -])# _ACJNI diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_prog_cc_for_build.m4 b/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_prog_cc_for_build.m4 deleted file mode 100644 index 77fd346a7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/ax_prog_cc_for_build.m4 +++ /dev/null @@ -1,125 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_prog_cc_for_build.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_PROG_CC_FOR_BUILD -# -# DESCRIPTION -# -# This macro searches for a C compiler that generates native executables, -# that is a C compiler that surely is not a cross-compiler. This can be -# useful if you have to generate source code at compile-time like for -# example GCC does. -# -# The macro sets the CC_FOR_BUILD and CPP_FOR_BUILD macros to anything -# needed to compile or link (CC_FOR_BUILD) and preprocess (CPP_FOR_BUILD). -# The value of these variables can be overridden by the user by specifying -# a compiler with an environment variable (like you do for standard CC). -# -# It also sets BUILD_EXEEXT and BUILD_OBJEXT to the executable and object -# file extensions for the build platform, and GCC_FOR_BUILD to `yes' if -# the compiler we found is GCC. All these variables but GCC_FOR_BUILD are -# substituted in the Makefile. -# -# LICENSE -# -# Copyright (c) 2008 Paolo Bonzini -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 8 - -AU_ALIAS([AC_PROG_CC_FOR_BUILD], [AX_PROG_CC_FOR_BUILD]) -AC_DEFUN([AX_PROG_CC_FOR_BUILD], [dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_PROG_CPP])dnl -AC_REQUIRE([AC_EXEEXT])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl - -dnl Use the standard macros, but make them use other variable names -dnl -pushdef([ac_cv_prog_CPP], ac_cv_build_prog_CPP)dnl -pushdef([ac_cv_prog_gcc], ac_cv_build_prog_gcc)dnl -pushdef([ac_cv_prog_cc_works], ac_cv_build_prog_cc_works)dnl -pushdef([ac_cv_prog_cc_cross], ac_cv_build_prog_cc_cross)dnl -pushdef([ac_cv_prog_cc_g], ac_cv_build_prog_cc_g)dnl -pushdef([ac_cv_exeext], ac_cv_build_exeext)dnl -pushdef([ac_cv_objext], ac_cv_build_objext)dnl -pushdef([ac_exeext], ac_build_exeext)dnl -pushdef([ac_objext], ac_build_objext)dnl -pushdef([CC], CC_FOR_BUILD)dnl -pushdef([CPP], CPP_FOR_BUILD)dnl -pushdef([CFLAGS], CFLAGS_FOR_BUILD)dnl -pushdef([CPPFLAGS], CPPFLAGS_FOR_BUILD)dnl -pushdef([LDFLAGS], LDFLAGS_FOR_BUILD)dnl -pushdef([host], build)dnl -pushdef([host_alias], build_alias)dnl -pushdef([host_cpu], build_cpu)dnl -pushdef([host_vendor], build_vendor)dnl -pushdef([host_os], build_os)dnl -pushdef([ac_cv_host], ac_cv_build)dnl -pushdef([ac_cv_host_alias], ac_cv_build_alias)dnl -pushdef([ac_cv_host_cpu], ac_cv_build_cpu)dnl -pushdef([ac_cv_host_vendor], ac_cv_build_vendor)dnl -pushdef([ac_cv_host_os], ac_cv_build_os)dnl -pushdef([ac_cpp], ac_build_cpp)dnl -pushdef([ac_compile], ac_build_compile)dnl -pushdef([ac_link], ac_build_link)dnl - -save_cross_compiling=$cross_compiling -save_ac_tool_prefix=$ac_tool_prefix -cross_compiling=no -ac_tool_prefix= - -AC_PROG_CC -AC_PROG_CPP -AC_EXEEXT - -ac_tool_prefix=$save_ac_tool_prefix -cross_compiling=$save_cross_compiling - -dnl Restore the old definitions -dnl -popdef([ac_link])dnl -popdef([ac_compile])dnl -popdef([ac_cpp])dnl -popdef([ac_cv_host_os])dnl -popdef([ac_cv_host_vendor])dnl -popdef([ac_cv_host_cpu])dnl -popdef([ac_cv_host_alias])dnl -popdef([ac_cv_host])dnl -popdef([host_os])dnl -popdef([host_vendor])dnl -popdef([host_cpu])dnl -popdef([host_alias])dnl -popdef([host])dnl -popdef([LDFLAGS])dnl -popdef([CPPFLAGS])dnl -popdef([CFLAGS])dnl -popdef([CPP])dnl -popdef([CC])dnl -popdef([ac_objext])dnl -popdef([ac_exeext])dnl -popdef([ac_cv_objext])dnl -popdef([ac_cv_exeext])dnl -popdef([ac_cv_prog_cc_g])dnl -popdef([ac_cv_prog_cc_cross])dnl -popdef([ac_cv_prog_cc_works])dnl -popdef([ac_cv_prog_gcc])dnl -popdef([ac_cv_prog_CPP])dnl - -dnl Finally, set Makefile variables -dnl -BUILD_EXEEXT=$ac_build_exeext -BUILD_OBJEXT=$ac_build_objext -AC_SUBST(BUILD_EXEEXT)dnl -AC_SUBST(BUILD_OBJEXT)dnl -AC_SUBST([CFLAGS_FOR_BUILD])dnl -AC_SUBST([CPPFLAGS_FOR_BUILD])dnl -AC_SUBST([LDFLAGS_FOR_BUILD])dnl -]) diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/bitcoin_secp.m4 b/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/bitcoin_secp.m4 deleted file mode 100644 index b74acb8c1..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/build-aux/m4/bitcoin_secp.m4 +++ /dev/null @@ -1,69 +0,0 @@ -dnl libsecp25k1 helper checks -AC_DEFUN([SECP_INT128_CHECK],[ -has_int128=$ac_cv_type___int128 -]) - -dnl escape "$0x" below using the m4 quadrigaph @S|@, and escape it again with a \ for the shell. -AC_DEFUN([SECP_64BIT_ASM_CHECK],[ -AC_MSG_CHECKING(for x86_64 assembly availability) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include ]],[[ - uint64_t a = 11, tmp; - __asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx"); - ]])],[has_64bit_asm=yes],[has_64bit_asm=no]) -AC_MSG_RESULT([$has_64bit_asm]) -]) - -dnl -AC_DEFUN([SECP_OPENSSL_CHECK],[ - has_libcrypto=no - m4_ifdef([PKG_CHECK_MODULES],[ - PKG_CHECK_MODULES([CRYPTO], [libcrypto], [has_libcrypto=yes],[has_libcrypto=no]) - if test x"$has_libcrypto" = x"yes"; then - TEMP_LIBS="$LIBS" - LIBS="$LIBS $CRYPTO_LIBS" - AC_CHECK_LIB(crypto, main,[AC_DEFINE(HAVE_LIBCRYPTO,1,[Define this symbol if libcrypto is installed])],[has_libcrypto=no]) - LIBS="$TEMP_LIBS" - fi - ]) - if test x$has_libcrypto = xno; then - AC_CHECK_HEADER(openssl/crypto.h,[ - AC_CHECK_LIB(crypto, main,[ - has_libcrypto=yes - CRYPTO_LIBS=-lcrypto - AC_DEFINE(HAVE_LIBCRYPTO,1,[Define this symbol if libcrypto is installed]) - ]) - ]) - LIBS= - fi -if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then - AC_MSG_CHECKING(for EC functions in libcrypto) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ - #include - #include - #include ]],[[ - EC_KEY *eckey = EC_KEY_new_by_curve_name(NID_secp256k1); - ECDSA_sign(0, NULL, 0, NULL, NULL, eckey); - ECDSA_verify(0, NULL, 0, NULL, 0, eckey); - EC_KEY_free(eckey); - ECDSA_SIG *sig_openssl; - sig_openssl = ECDSA_SIG_new(); - (void)sig_openssl->r; - ECDSA_SIG_free(sig_openssl); - ]])],[has_openssl_ec=yes],[has_openssl_ec=no]) - AC_MSG_RESULT([$has_openssl_ec]) -fi -]) - -dnl -AC_DEFUN([SECP_GMP_CHECK],[ -if test x"$has_gmp" != x"yes"; then - CPPFLAGS_TEMP="$CPPFLAGS" - CPPFLAGS="$GMP_CPPFLAGS $CPPFLAGS" - LIBS_TEMP="$LIBS" - LIBS="$GMP_LIBS $LIBS" - AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS="$GMP_LIBS -lgmp"; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])]) - CPPFLAGS="$CPPFLAGS_TEMP" - LIBS="$LIBS_TEMP" -fi -]) diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/configure.ac b/crypto/secp256k1/internal/secp256k1/libsecp256k1/configure.ac deleted file mode 100644 index e5fcbcb4e..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/configure.ac +++ /dev/null @@ -1,493 +0,0 @@ -AC_PREREQ([2.60]) -AC_INIT([libsecp256k1],[0.1]) -AC_CONFIG_AUX_DIR([build-aux]) -AC_CONFIG_MACRO_DIR([build-aux/m4]) -AC_CANONICAL_HOST -AH_TOP([#ifndef LIBSECP256K1_CONFIG_H]) -AH_TOP([#define LIBSECP256K1_CONFIG_H]) -AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) -AM_INIT_AUTOMAKE([foreign subdir-objects]) -LT_INIT - -dnl make the compilation flags quiet unless V=1 is used -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -PKG_PROG_PKG_CONFIG - -AC_PATH_TOOL(AR, ar) -AC_PATH_TOOL(RANLIB, ranlib) -AC_PATH_TOOL(STRIP, strip) -AX_PROG_CC_FOR_BUILD - -if test "x$CFLAGS" = "x"; then - CFLAGS="-g" -fi - -AM_PROG_CC_C_O - -AC_PROG_CC_C89 -if test x"$ac_cv_prog_cc_c89" = x"no"; then - AC_MSG_ERROR([c89 compiler support required]) -fi -AM_PROG_AS - -case $host_os in - *darwin*) - if test x$cross_compiling != xyes; then - AC_PATH_PROG([BREW],brew,) - if test x$BREW != x; then - dnl These Homebrew packages may be keg-only, meaning that they won't be found - dnl in expected paths because they may conflict with system files. Ask - dnl Homebrew where each one is located, then adjust paths accordingly. - - openssl_prefix=`$BREW --prefix openssl 2>/dev/null` - gmp_prefix=`$BREW --prefix gmp 2>/dev/null` - if test x$openssl_prefix != x; then - PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" - export PKG_CONFIG_PATH - fi - if test x$gmp_prefix != x; then - GMP_CPPFLAGS="-I$gmp_prefix/include" - GMP_LIBS="-L$gmp_prefix/lib" - fi - else - AC_PATH_PROG([PORT],port,) - dnl if homebrew isn't installed and macports is, add the macports default paths - dnl as a last resort. - if test x$PORT != x; then - CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" - LDFLAGS="$LDFLAGS -L/opt/local/lib" - fi - fi - fi - ;; -esac - -CFLAGS="$CFLAGS -W" - -warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wno-unused-function -Wno-long-long -Wno-overlength-strings" -saved_CFLAGS="$CFLAGS" -CFLAGS="$CFLAGS $warn_CFLAGS" -AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - -saved_CFLAGS="$CFLAGS" -CFLAGS="$CFLAGS -fvisibility=hidden" -AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - -AC_ARG_ENABLE(benchmark, - AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is no)]), - [use_benchmark=$enableval], - [use_benchmark=no]) - -AC_ARG_ENABLE(coverage, - AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]), - [enable_coverage=$enableval], - [enable_coverage=no]) - -AC_ARG_ENABLE(tests, - AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), - [use_tests=$enableval], - [use_tests=yes]) - -AC_ARG_ENABLE(openssl_tests, - AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests, if OpenSSL is available (default is auto)]), - [enable_openssl_tests=$enableval], - [enable_openssl_tests=auto]) - -AC_ARG_ENABLE(experimental, - AS_HELP_STRING([--enable-experimental],[allow experimental configure options (default is no)]), - [use_experimental=$enableval], - [use_experimental=no]) - -AC_ARG_ENABLE(exhaustive_tests, - AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]), - [use_exhaustive_tests=$enableval], - [use_exhaustive_tests=yes]) - -AC_ARG_ENABLE(endomorphism, - AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]), - [use_endomorphism=$enableval], - [use_endomorphism=no]) - -AC_ARG_ENABLE(ecmult_static_precomputation, - AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]), - [use_ecmult_static_precomputation=$enableval], - [use_ecmult_static_precomputation=auto]) - -AC_ARG_ENABLE(module_ecdh, - AS_HELP_STRING([--enable-module-ecdh],[enable ECDH shared secret computation (experimental)]), - [enable_module_ecdh=$enableval], - [enable_module_ecdh=no]) - -AC_ARG_ENABLE(module_recovery, - AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]), - [enable_module_recovery=$enableval], - [enable_module_recovery=no]) - -AC_ARG_ENABLE(jni, - AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni (default is auto)]), - [use_jni=$enableval], - [use_jni=auto]) - -AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto], -[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto]) - -AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], -[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto]) - -AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto], -[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto]) - -AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto] -[Specify assembly optimizations to use. Default is auto (experimental: arm)])],[req_asm=$withval], [req_asm=auto]) - -AC_CHECK_TYPES([__int128]) - -AC_MSG_CHECKING([for __builtin_expect]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[void myfunc() {__builtin_expect(0,0);}]])], - [ AC_MSG_RESULT([yes]);AC_DEFINE(HAVE_BUILTIN_EXPECT,1,[Define this symbol if __builtin_expect is available]) ], - [ AC_MSG_RESULT([no]) - ]) - -if test x"$enable_coverage" = x"yes"; then - AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) - CFLAGS="$CFLAGS -O0 --coverage" - LDFLAGS="--coverage" -else - CFLAGS="$CFLAGS -O3" -fi - -if test x"$use_ecmult_static_precomputation" != x"no"; then - save_cross_compiling=$cross_compiling - cross_compiling=no - TEMP_CC="$CC" - CC="$CC_FOR_BUILD" - AC_MSG_CHECKING([native compiler: ${CC_FOR_BUILD}]) - AC_RUN_IFELSE( - [AC_LANG_PROGRAM([], [return 0])], - [working_native_cc=yes], - [working_native_cc=no],[dnl]) - CC="$TEMP_CC" - cross_compiling=$save_cross_compiling - - if test x"$working_native_cc" = x"no"; then - set_precomp=no - if test x"$use_ecmult_static_precomputation" = x"yes"; then - AC_MSG_ERROR([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD]) - else - AC_MSG_RESULT([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD]) - fi - else - AC_MSG_RESULT([ok]) - set_precomp=yes - fi -else - set_precomp=no -fi - -if test x"$req_asm" = x"auto"; then - SECP_64BIT_ASM_CHECK - if test x"$has_64bit_asm" = x"yes"; then - set_asm=x86_64 - fi - if test x"$set_asm" = x; then - set_asm=no - fi -else - set_asm=$req_asm - case $set_asm in - x86_64) - SECP_64BIT_ASM_CHECK - if test x"$has_64bit_asm" != x"yes"; then - AC_MSG_ERROR([x86_64 assembly optimization requested but not available]) - fi - ;; - arm) - ;; - no) - ;; - *) - AC_MSG_ERROR([invalid assembly optimization selection]) - ;; - esac -fi - -if test x"$req_field" = x"auto"; then - if test x"set_asm" = x"x86_64"; then - set_field=64bit - fi - if test x"$set_field" = x; then - SECP_INT128_CHECK - if test x"$has_int128" = x"yes"; then - set_field=64bit - fi - fi - if test x"$set_field" = x; then - set_field=32bit - fi -else - set_field=$req_field - case $set_field in - 64bit) - if test x"$set_asm" != x"x86_64"; then - SECP_INT128_CHECK - if test x"$has_int128" != x"yes"; then - AC_MSG_ERROR([64bit field explicitly requested but neither __int128 support or x86_64 assembly available]) - fi - fi - ;; - 32bit) - ;; - *) - AC_MSG_ERROR([invalid field implementation selection]) - ;; - esac -fi - -if test x"$req_scalar" = x"auto"; then - SECP_INT128_CHECK - if test x"$has_int128" = x"yes"; then - set_scalar=64bit - fi - if test x"$set_scalar" = x; then - set_scalar=32bit - fi -else - set_scalar=$req_scalar - case $set_scalar in - 64bit) - SECP_INT128_CHECK - if test x"$has_int128" != x"yes"; then - AC_MSG_ERROR([64bit scalar explicitly requested but __int128 support not available]) - fi - ;; - 32bit) - ;; - *) - AC_MSG_ERROR([invalid scalar implementation selected]) - ;; - esac -fi - -if test x"$req_bignum" = x"auto"; then - SECP_GMP_CHECK - if test x"$has_gmp" = x"yes"; then - set_bignum=gmp - fi - - if test x"$set_bignum" = x; then - set_bignum=no - fi -else - set_bignum=$req_bignum - case $set_bignum in - gmp) - SECP_GMP_CHECK - if test x"$has_gmp" != x"yes"; then - AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available]) - fi - ;; - no) - ;; - *) - AC_MSG_ERROR([invalid bignum implementation selection]) - ;; - esac -fi - -# select assembly optimization -use_external_asm=no - -case $set_asm in -x86_64) - AC_DEFINE(USE_ASM_X86_64, 1, [Define this symbol to enable x86_64 assembly optimizations]) - ;; -arm) - use_external_asm=yes - ;; -no) - ;; -*) - AC_MSG_ERROR([invalid assembly optimizations]) - ;; -esac - -# select field implementation -case $set_field in -64bit) - AC_DEFINE(USE_FIELD_5X52, 1, [Define this symbol to use the FIELD_5X52 implementation]) - ;; -32bit) - AC_DEFINE(USE_FIELD_10X26, 1, [Define this symbol to use the FIELD_10X26 implementation]) - ;; -*) - AC_MSG_ERROR([invalid field implementation]) - ;; -esac - -# select bignum implementation -case $set_bignum in -gmp) - AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) - AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) - AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) - ;; -no) - AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) - AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) - ;; -*) - AC_MSG_ERROR([invalid bignum implementation]) - ;; -esac - -#select scalar implementation -case $set_scalar in -64bit) - AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation]) - ;; -32bit) - AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation]) - ;; -*) - AC_MSG_ERROR([invalid scalar implementation]) - ;; -esac - -if test x"$use_tests" = x"yes"; then - SECP_OPENSSL_CHECK - if test x"$has_openssl_ec" = x"yes"; then - if test x"$enable_openssl_tests" != x"no"; then - AC_DEFINE(ENABLE_OPENSSL_TESTS, 1, [Define this symbol if OpenSSL EC functions are available]) - SECP_TEST_INCLUDES="$SSL_CFLAGS $CRYPTO_CFLAGS" - SECP_TEST_LIBS="$CRYPTO_LIBS" - - case $host in - *mingw*) - SECP_TEST_LIBS="$SECP_TEST_LIBS -lgdi32" - ;; - esac - fi - else - if test x"$enable_openssl_tests" = x"yes"; then - AC_MSG_ERROR([OpenSSL tests requested but OpenSSL with EC support is not available]) - fi - fi -else - if test x"$enable_openssl_tests" = x"yes"; then - AC_MSG_ERROR([OpenSSL tests requested but tests are not enabled]) - fi -fi - -if test x"$use_jni" != x"no"; then - AX_JNI_INCLUDE_DIR - have_jni_dependencies=yes - if test x"$enable_module_ecdh" = x"no"; then - have_jni_dependencies=no - fi - if test "x$JNI_INCLUDE_DIRS" = "x"; then - have_jni_dependencies=no - fi - if test "x$have_jni_dependencies" = "xno"; then - if test x"$use_jni" = x"yes"; then - AC_MSG_ERROR([jni support explicitly requested but headers/dependencies were not found. Enable ECDH and try again.]) - fi - AC_MSG_WARN([jni headers/dependencies not found. jni support disabled]) - use_jni=no - else - use_jni=yes - for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS; do - JNI_INCLUDES="$JNI_INCLUDES -I$JNI_INCLUDE_DIR" - done - fi -fi - -if test x"$set_bignum" = x"gmp"; then - SECP_LIBS="$SECP_LIBS $GMP_LIBS" - SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" -fi - -if test x"$use_endomorphism" = x"yes"; then - AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) -fi - -if test x"$set_precomp" = x"yes"; then - AC_DEFINE(USE_ECMULT_STATIC_PRECOMPUTATION, 1, [Define this symbol to use a statically generated ecmult table]) -fi - -if test x"$enable_module_ecdh" = x"yes"; then - AC_DEFINE(ENABLE_MODULE_ECDH, 1, [Define this symbol to enable the ECDH module]) -fi - -if test x"$enable_module_recovery" = x"yes"; then - AC_DEFINE(ENABLE_MODULE_RECOVERY, 1, [Define this symbol to enable the ECDSA pubkey recovery module]) -fi - -AC_C_BIGENDIAN() - -if test x"$use_external_asm" = x"yes"; then - AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) -fi - -AC_MSG_NOTICE([Using static precomputation: $set_precomp]) -AC_MSG_NOTICE([Using assembly optimizations: $set_asm]) -AC_MSG_NOTICE([Using field implementation: $set_field]) -AC_MSG_NOTICE([Using bignum implementation: $set_bignum]) -AC_MSG_NOTICE([Using scalar implementation: $set_scalar]) -AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism]) -AC_MSG_NOTICE([Building for coverage analysis: $enable_coverage]) -AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) -AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery]) -AC_MSG_NOTICE([Using jni: $use_jni]) - -if test x"$enable_experimental" = x"yes"; then - AC_MSG_NOTICE([******]) - AC_MSG_NOTICE([WARNING: experimental build]) - AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.]) - AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) - AC_MSG_NOTICE([******]) -else - if test x"$enable_module_ecdh" = x"yes"; then - AC_MSG_ERROR([ECDH module is experimental. Use --enable-experimental to allow.]) - fi - if test x"$set_asm" = x"arm"; then - AC_MSG_ERROR([ARM assembly optimization is experimental. Use --enable-experimental to allow.]) - fi -fi - -AC_CONFIG_HEADERS([src/libsecp256k1-config.h]) -AC_CONFIG_FILES([Makefile libsecp256k1.pc]) -AC_SUBST(JNI_INCLUDES) -AC_SUBST(SECP_INCLUDES) -AC_SUBST(SECP_LIBS) -AC_SUBST(SECP_TEST_LIBS) -AC_SUBST(SECP_TEST_INCLUDES) -AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) -AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) -AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) -AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) -AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"]) -AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) -AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) -AM_CONDITIONAL([USE_JNI], [test x"$use_jni" == x"yes"]) -AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) -AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) - -dnl make sure nothing new is exported so that we don't break the cache -PKGCONFIG_PATH_TEMP="$PKG_CONFIG_PATH" -unset PKG_CONFIG_PATH -PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" - -AC_OUTPUT diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.c deleted file mode 100644 index 5b141a994..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.c +++ /dev/null @@ -1,150 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include -#include - -#include "lax_der_parsing.h" - -int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - size_t rpos, rlen, spos, slen; - size_t pos = 0; - size_t lenbyte; - unsigned char tmpsig[64] = {0}; - int overflow = 0; - - /* Hack to initialize sig with a correctly-parsed but invalid signature. */ - secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); - - /* Sequence tag byte */ - if (pos == inputlen || input[pos] != 0x30) { - return 0; - } - pos++; - - /* Sequence length bytes */ - if (pos == inputlen) { - return 0; - } - lenbyte = input[pos++]; - if (lenbyte & 0x80) { - lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { - return 0; - } - pos += lenbyte; - } - - /* Integer tag byte for R */ - if (pos == inputlen || input[pos] != 0x02) { - return 0; - } - pos++; - - /* Integer length for R */ - if (pos == inputlen) { - return 0; - } - lenbyte = input[pos++]; - if (lenbyte & 0x80) { - lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { - return 0; - } - while (lenbyte > 0 && input[pos] == 0) { - pos++; - lenbyte--; - } - if (lenbyte >= sizeof(size_t)) { - return 0; - } - rlen = 0; - while (lenbyte > 0) { - rlen = (rlen << 8) + input[pos]; - pos++; - lenbyte--; - } - } else { - rlen = lenbyte; - } - if (rlen > inputlen - pos) { - return 0; - } - rpos = pos; - pos += rlen; - - /* Integer tag byte for S */ - if (pos == inputlen || input[pos] != 0x02) { - return 0; - } - pos++; - - /* Integer length for S */ - if (pos == inputlen) { - return 0; - } - lenbyte = input[pos++]; - if (lenbyte & 0x80) { - lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { - return 0; - } - while (lenbyte > 0 && input[pos] == 0) { - pos++; - lenbyte--; - } - if (lenbyte >= sizeof(size_t)) { - return 0; - } - slen = 0; - while (lenbyte > 0) { - slen = (slen << 8) + input[pos]; - pos++; - lenbyte--; - } - } else { - slen = lenbyte; - } - if (slen > inputlen - pos) { - return 0; - } - spos = pos; - pos += slen; - - /* Ignore leading zeroes in R */ - while (rlen > 0 && input[rpos] == 0) { - rlen--; - rpos++; - } - /* Copy R value */ - if (rlen > 32) { - overflow = 1; - } else { - memcpy(tmpsig + 32 - rlen, input + rpos, rlen); - } - - /* Ignore leading zeroes in S */ - while (slen > 0 && input[spos] == 0) { - slen--; - spos++; - } - /* Copy S value */ - if (slen > 32) { - overflow = 1; - } else { - memcpy(tmpsig + 64 - slen, input + spos, slen); - } - - if (!overflow) { - overflow = !secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); - } - if (overflow) { - memset(tmpsig, 0, 64); - secp256k1_ecdsa_signature_parse_compact(ctx, sig, tmpsig); - } - return 1; -} - diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.h deleted file mode 100644 index 6d27871a7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_parsing.h +++ /dev/null @@ -1,91 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -/**** - * Please do not link this file directly. It is not part of the libsecp256k1 - * project and does not promise any stability in its API, functionality or - * presence. Projects which use this code should instead copy this header - * and its accompanying .c file directly into their codebase. - ****/ - -/* This file defines a function that parses DER with various errors and - * violations. This is not a part of the library itself, because the allowed - * violations are chosen arbitrarily and do not follow or establish any - * standard. - * - * In many places it matters that different implementations do not only accept - * the same set of valid signatures, but also reject the same set of signatures. - * The only means to accomplish that is by strictly obeying a standard, and not - * accepting anything else. - * - * Nonetheless, sometimes there is a need for compatibility with systems that - * use signatures which do not strictly obey DER. The snippet below shows how - * certain violations are easily supported. You may need to adapt it. - * - * Do not use this for new systems. Use well-defined DER or compact signatures - * instead if you have the choice (see secp256k1_ecdsa_signature_parse_der and - * secp256k1_ecdsa_signature_parse_compact). - * - * The supported violations are: - * - All numbers are parsed as nonnegative integers, even though X.609-0207 - * section 8.3.3 specifies that integers are always encoded as two's - * complement. - * - Integers can have length 0, even though section 8.3.1 says they can't. - * - Integers with overly long padding are accepted, violation section - * 8.3.2. - * - 127-byte long length descriptors are accepted, even though section - * 8.1.3.5.c says that they are not. - * - Trailing garbage data inside or after the signature is ignored. - * - The length descriptor of the sequence is ignored. - * - * Compared to for example OpenSSL, many violations are NOT supported: - * - Using overly long tag descriptors for the sequence or integers inside, - * violating section 8.1.2.2. - * - Encoding primitive integers as constructed values, violating section - * 8.3.1. - */ - -#ifndef _SECP256K1_CONTRIB_LAX_DER_PARSING_H_ -#define _SECP256K1_CONTRIB_LAX_DER_PARSING_H_ - -#include - -# ifdef __cplusplus -extern "C" { -# endif - -/** Parse a signature in "lax DER" format - * - * Returns: 1 when the signature could be parsed, 0 otherwise. - * Args: ctx: a secp256k1 context object - * Out: sig: a pointer to a signature object - * In: input: a pointer to the signature to be parsed - * inputlen: the length of the array pointed to be input - * - * This function will accept any valid DER encoded signature, even if the - * encoded numbers are out of range. In addition, it will accept signatures - * which violate the DER spec in various ways. Its purpose is to allow - * validation of the Bitcoin blockchain, which includes non-DER signatures - * from before the network rules were updated to enforce DER. Note that - * the set of supported violations is a strict subset of what OpenSSL will - * accept. - * - * After the call, sig will always be initialized. If parsing failed or the - * encoded numbers are out of range, signature validation with it is - * guaranteed to fail for every message and public key. - */ -int ecdsa_signature_parse_der_lax( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, - const unsigned char *input, - size_t inputlen -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.c deleted file mode 100644 index c2e63b4b8..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.c +++ /dev/null @@ -1,113 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014, 2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include -#include - -#include "lax_der_privatekey_parsing.h" - -int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, const unsigned char *privkey, size_t privkeylen) { - const unsigned char *end = privkey + privkeylen; - int lenb = 0; - int len = 0; - memset(out32, 0, 32); - /* sequence header */ - if (end < privkey+1 || *privkey != 0x30) { - return 0; - } - privkey++; - /* sequence length constructor */ - if (end < privkey+1 || !(*privkey & 0x80)) { - return 0; - } - lenb = *privkey & ~0x80; privkey++; - if (lenb < 1 || lenb > 2) { - return 0; - } - if (end < privkey+lenb) { - return 0; - } - /* sequence length */ - len = privkey[lenb-1] | (lenb > 1 ? privkey[lenb-2] << 8 : 0); - privkey += lenb; - if (end < privkey+len) { - return 0; - } - /* sequence element 0: version number (=1) */ - if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01) { - return 0; - } - privkey += 3; - /* sequence element 1: octet string, up to 32 bytes */ - if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) { - return 0; - } - memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); - if (!secp256k1_ec_seckey_verify(ctx, out32)) { - memset(out32, 0, 32); - return 0; - } - return 1; -} - -int ec_privkey_export_der(const secp256k1_context *ctx, unsigned char *privkey, size_t *privkeylen, const unsigned char *key32, int compressed) { - secp256k1_pubkey pubkey; - size_t pubkeylen = 0; - if (!secp256k1_ec_pubkey_create(ctx, &pubkey, key32)) { - *privkeylen = 0; - return 0; - } - if (compressed) { - static const unsigned char begin[] = { - 0x30,0x81,0xD3,0x02,0x01,0x01,0x04,0x20 - }; - static const unsigned char middle[] = { - 0xA0,0x81,0x85,0x30,0x81,0x82,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48, - 0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04, - 0x21,0x02,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87, - 0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8, - 0x17,0x98,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E, - 0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x24,0x03,0x22,0x00 - }; - unsigned char *ptr = privkey; - memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin); - memcpy(ptr, key32, 32); ptr += 32; - memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); - pubkeylen = 33; - secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED); - ptr += pubkeylen; - *privkeylen = ptr - privkey; - } else { - static const unsigned char begin[] = { - 0x30,0x82,0x01,0x13,0x02,0x01,0x01,0x04,0x20 - }; - static const unsigned char middle[] = { - 0xA0,0x81,0xA5,0x30,0x81,0xA2,0x02,0x01,0x01,0x30,0x2C,0x06,0x07,0x2A,0x86,0x48, - 0xCE,0x3D,0x01,0x01,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F,0x30,0x06,0x04,0x01,0x00,0x04,0x01,0x07,0x04, - 0x41,0x04,0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,0x55,0xA0,0x62,0x95,0xCE,0x87, - 0x0B,0x07,0x02,0x9B,0xFC,0xDB,0x2D,0xCE,0x28,0xD9,0x59,0xF2,0x81,0x5B,0x16,0xF8, - 0x17,0x98,0x48,0x3A,0xDA,0x77,0x26,0xA3,0xC4,0x65,0x5D,0xA4,0xFB,0xFC,0x0E,0x11, - 0x08,0xA8,0xFD,0x17,0xB4,0x48,0xA6,0x85,0x54,0x19,0x9C,0x47,0xD0,0x8F,0xFB,0x10, - 0xD4,0xB8,0x02,0x21,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFE,0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,0xBF,0xD2,0x5E, - 0x8C,0xD0,0x36,0x41,0x41,0x02,0x01,0x01,0xA1,0x44,0x03,0x42,0x00 - }; - unsigned char *ptr = privkey; - memcpy(ptr, begin, sizeof(begin)); ptr += sizeof(begin); - memcpy(ptr, key32, 32); ptr += 32; - memcpy(ptr, middle, sizeof(middle)); ptr += sizeof(middle); - pubkeylen = 65; - secp256k1_ec_pubkey_serialize(ctx, ptr, &pubkeylen, &pubkey, SECP256K1_EC_UNCOMPRESSED); - ptr += pubkeylen; - *privkeylen = ptr - privkey; - } - return 1; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.h deleted file mode 100644 index 2fd088f8a..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/contrib/lax_der_privatekey_parsing.h +++ /dev/null @@ -1,90 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014, 2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -/**** - * Please do not link this file directly. It is not part of the libsecp256k1 - * project and does not promise any stability in its API, functionality or - * presence. Projects which use this code should instead copy this header - * and its accompanying .c file directly into their codebase. - ****/ - -/* This file contains code snippets that parse DER private keys with - * various errors and violations. This is not a part of the library - * itself, because the allowed violations are chosen arbitrarily and - * do not follow or establish any standard. - * - * It also contains code to serialize private keys in a compatible - * manner. - * - * These functions are meant for compatibility with applications - * that require BER encoded keys. When working with secp256k1-specific - * code, the simple 32-byte private keys normally used by the - * library are sufficient. - */ - -#ifndef _SECP256K1_CONTRIB_BER_PRIVATEKEY_H_ -#define _SECP256K1_CONTRIB_BER_PRIVATEKEY_H_ - -#include - -# ifdef __cplusplus -extern "C" { -# endif - -/** Export a private key in DER format. - * - * Returns: 1 if the private key was valid. - * Args: ctx: pointer to a context object, initialized for signing (cannot - * be NULL) - * Out: privkey: pointer to an array for storing the private key in BER. - * Should have space for 279 bytes, and cannot be NULL. - * privkeylen: Pointer to an int where the length of the private key in - * privkey will be stored. - * In: seckey: pointer to a 32-byte secret key to export. - * compressed: 1 if the key should be exported in - * compressed format, 0 otherwise - * - * This function is purely meant for compatibility with applications that - * require BER encoded keys. When working with secp256k1-specific code, the - * simple 32-byte private keys are sufficient. - * - * Note that this function does not guarantee correct DER output. It is - * guaranteed to be parsable by secp256k1_ec_privkey_import_der - */ -SECP256K1_WARN_UNUSED_RESULT int ec_privkey_export_der( - const secp256k1_context* ctx, - unsigned char *privkey, - size_t *privkeylen, - const unsigned char *seckey, - int compressed -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Import a private key in DER format. - * Returns: 1 if a private key was extracted. - * Args: ctx: pointer to a context object (cannot be NULL). - * Out: seckey: pointer to a 32-byte array for storing the private key. - * (cannot be NULL). - * In: privkey: pointer to a private key in DER format (cannot be NULL). - * privkeylen: length of the DER private key pointed to be privkey. - * - * This function will accept more than just strict DER, and even allow some BER - * violations. The public key stored inside the DER-encoded private key is not - * verified for correctness, nor are the curve parameters. Use this function - * only if you know in advance it is supposed to contain a secp256k1 private - * key. - */ -SECP256K1_WARN_UNUSED_RESULT int ec_privkey_import_der( - const secp256k1_context* ctx, - unsigned char *seckey, - const unsigned char *privkey, - size_t privkeylen -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1.h deleted file mode 100644 index f268e309d..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1.h +++ /dev/null @@ -1,577 +0,0 @@ -#ifndef _SECP256K1_ -# define _SECP256K1_ - -# ifdef __cplusplus -extern "C" { -# endif - -#include - -/* These rules specify the order of arguments in API calls: - * - * 1. Context pointers go first, followed by output arguments, combined - * output/input arguments, and finally input-only arguments. - * 2. Array lengths always immediately the follow the argument whose length - * they describe, even if this violates rule 1. - * 3. Within the OUT/OUTIN/IN groups, pointers to data that is typically generated - * later go first. This means: signatures, public nonces, private nonces, - * messages, public keys, secret keys, tweaks. - * 4. Arguments that are not data pointers go last, from more complex to less - * complex: function pointers, algorithm names, messages, void pointers, - * counts, flags, booleans. - * 5. Opaque data pointers follow the function pointer they are to be passed to. - */ - -/** Opaque data structure that holds context information (precomputed tables etc.). - * - * The purpose of context structures is to cache large precomputed data tables - * that are expensive to construct, and also to maintain the randomization data - * for blinding. - * - * Do not create a new context object for each operation, as construction is - * far slower than all other API calls (~100 times slower than an ECDSA - * verification). - * - * A constructed context can safely be used from multiple threads - * simultaneously, but API call that take a non-const pointer to a context - * need exclusive access to it. In particular this is the case for - * secp256k1_context_destroy and secp256k1_context_randomize. - * - * Regarding randomization, either do it once at creation time (in which case - * you do not need any locking for the other calls), or use a read-write lock. - */ -typedef struct secp256k1_context_struct secp256k1_context; - -/** Opaque data structure that holds a parsed and valid public key. - * - * The exact representation of data inside is implementation defined and not - * guaranteed to be portable between different platforms or versions. It is - * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. - */ -typedef struct { - unsigned char data[64]; -} secp256k1_pubkey; - -/** Opaque data structured that holds a parsed ECDSA signature. - * - * The exact representation of data inside is implementation defined and not - * guaranteed to be portable between different platforms or versions. It is - * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use the secp256k1_ecdsa_signature_serialize_* and - * secp256k1_ecdsa_signature_serialize_* functions. - */ -typedef struct { - unsigned char data[64]; -} secp256k1_ecdsa_signature; - -/** A pointer to a function to deterministically generate a nonce. - * - * Returns: 1 if a nonce was successfully generated. 0 will cause signing to fail. - * Out: nonce32: pointer to a 32-byte array to be filled by the function. - * In: msg32: the 32-byte message hash being verified (will not be NULL) - * key32: pointer to a 32-byte secret key (will not be NULL) - * algo16: pointer to a 16-byte array describing the signature - * algorithm (will be NULL for ECDSA for compatibility). - * data: Arbitrary data pointer that is passed through. - * attempt: how many iterations we have tried to find a nonce. - * This will almost always be 0, but different attempt values - * are required to result in a different nonce. - * - * Except for test cases, this function should compute some cryptographic hash of - * the message, the algorithm, the key and the attempt. - */ -typedef int (*secp256k1_nonce_function)( - unsigned char *nonce32, - const unsigned char *msg32, - const unsigned char *key32, - const unsigned char *algo16, - void *data, - unsigned int attempt -); - -# if !defined(SECP256K1_GNUC_PREREQ) -# if defined(__GNUC__)&&defined(__GNUC_MINOR__) -# define SECP256K1_GNUC_PREREQ(_maj,_min) \ - ((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min)) -# else -# define SECP256K1_GNUC_PREREQ(_maj,_min) 0 -# endif -# endif - -# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) -# if SECP256K1_GNUC_PREREQ(2,7) -# define SECP256K1_INLINE __inline__ -# elif (defined(_MSC_VER)) -# define SECP256K1_INLINE __inline -# else -# define SECP256K1_INLINE -# endif -# else -# define SECP256K1_INLINE inline -# endif - -#ifndef SECP256K1_API -# if defined(_WIN32) -# ifdef SECP256K1_BUILD -# define SECP256K1_API __declspec(dllexport) -# else -# define SECP256K1_API -# endif -# elif defined(__GNUC__) && defined(SECP256K1_BUILD) -# define SECP256K1_API __attribute__ ((visibility ("default"))) -# else -# define SECP256K1_API -# endif -#endif - -/**Warning attributes - * NONNULL is not used if SECP256K1_BUILD is set to avoid the compiler optimizing out - * some paranoid null checks. */ -# if defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) -# define SECP256K1_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) -# else -# define SECP256K1_WARN_UNUSED_RESULT -# endif -# if !defined(SECP256K1_BUILD) && defined(__GNUC__) && SECP256K1_GNUC_PREREQ(3, 4) -# define SECP256K1_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x))) -# else -# define SECP256K1_ARG_NONNULL(_x) -# endif - -/** All flags' lower 8 bits indicate what they're for. Do not use directly. */ -#define SECP256K1_FLAGS_TYPE_MASK ((1 << 8) - 1) -#define SECP256K1_FLAGS_TYPE_CONTEXT (1 << 0) -#define SECP256K1_FLAGS_TYPE_COMPRESSION (1 << 1) -/** The higher bits contain the actual data. Do not use directly. */ -#define SECP256K1_FLAGS_BIT_CONTEXT_VERIFY (1 << 8) -#define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9) -#define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) - -/** Flags to pass to secp256k1_context_create. */ -#define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) -#define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) -#define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) - -/** Flag to pass to secp256k1_ec_pubkey_serialize and secp256k1_ec_privkey_export. */ -#define SECP256K1_EC_COMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION | SECP256K1_FLAGS_BIT_COMPRESSION) -#define SECP256K1_EC_UNCOMPRESSED (SECP256K1_FLAGS_TYPE_COMPRESSION) - -/** Create a secp256k1 context object. - * - * Returns: a newly created context object. - * In: flags: which parts of the context to initialize. - */ -SECP256K1_API secp256k1_context* secp256k1_context_create( - unsigned int flags -) SECP256K1_WARN_UNUSED_RESULT; - -/** Copies a secp256k1 context object. - * - * Returns: a newly created context object. - * Args: ctx: an existing context to copy (cannot be NULL) - */ -SECP256K1_API secp256k1_context* secp256k1_context_clone( - const secp256k1_context* ctx -) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; - -/** Destroy a secp256k1 context object. - * - * The context pointer may not be used afterwards. - * Args: ctx: an existing context to destroy (cannot be NULL) - */ -SECP256K1_API void secp256k1_context_destroy( - secp256k1_context* ctx -); - -/** Set a callback function to be called when an illegal argument is passed to - * an API call. It will only trigger for violations that are mentioned - * explicitly in the header. - * - * The philosophy is that these shouldn't be dealt with through a - * specific return value, as calling code should not have branches to deal with - * the case that this code itself is broken. - * - * On the other hand, during debug stage, one would want to be informed about - * such mistakes, and the default (crashing) may be inadvisable. - * When this callback is triggered, the API function called is guaranteed not - * to cause a crash, though its return value and output arguments are - * undefined. - * - * Args: ctx: an existing context object (cannot be NULL) - * In: fun: a pointer to a function to call when an illegal argument is - * passed to the API, taking a message and an opaque pointer - * (NULL restores a default handler that calls abort). - * data: the opaque pointer to pass to fun above. - */ -SECP256K1_API void secp256k1_context_set_illegal_callback( - secp256k1_context* ctx, - void (*fun)(const char* message, void* data), - const void* data -) SECP256K1_ARG_NONNULL(1); - -/** Set a callback function to be called when an internal consistency check - * fails. The default is crashing. - * - * This can only trigger in case of a hardware failure, miscompilation, - * memory corruption, serious bug in the library, or other error would can - * otherwise result in undefined behaviour. It will not trigger due to mere - * incorrect usage of the API (see secp256k1_context_set_illegal_callback - * for that). After this callback returns, anything may happen, including - * crashing. - * - * Args: ctx: an existing context object (cannot be NULL) - * In: fun: a pointer to a function to call when an internal error occurs, - * taking a message and an opaque pointer (NULL restores a default - * handler that calls abort). - * data: the opaque pointer to pass to fun above. - */ -SECP256K1_API void secp256k1_context_set_error_callback( - secp256k1_context* ctx, - void (*fun)(const char* message, void* data), - const void* data -) SECP256K1_ARG_NONNULL(1); - -/** Parse a variable-length public key into the pubkey object. - * - * Returns: 1 if the public key was fully valid. - * 0 if the public key could not be parsed or is invalid. - * Args: ctx: a secp256k1 context object. - * Out: pubkey: pointer to a pubkey object. If 1 is returned, it is set to a - * parsed version of input. If not, its value is undefined. - * In: input: pointer to a serialized public key - * inputlen: length of the array pointed to by input - * - * This function supports parsing compressed (33 bytes, header byte 0x02 or - * 0x03), uncompressed (65 bytes, header byte 0x04), or hybrid (65 bytes, header - * byte 0x06 or 0x07) format public keys. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_parse( - const secp256k1_context* ctx, - secp256k1_pubkey* pubkey, - const unsigned char *input, - size_t inputlen -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Serialize a pubkey object into a serialized byte sequence. - * - * Returns: 1 always. - * Args: ctx: a secp256k1 context object. - * Out: output: a pointer to a 65-byte (if compressed==0) or 33-byte (if - * compressed==1) byte array to place the serialized key - * in. - * In/Out: outputlen: a pointer to an integer which is initially set to the - * size of output, and is overwritten with the written - * size. - * In: pubkey: a pointer to a secp256k1_pubkey containing an - * initialized public key. - * flags: SECP256K1_EC_COMPRESSED if serialization should be in - * compressed format, otherwise SECP256K1_EC_UNCOMPRESSED. - */ -SECP256K1_API int secp256k1_ec_pubkey_serialize( - const secp256k1_context* ctx, - unsigned char *output, - size_t *outputlen, - const secp256k1_pubkey* pubkey, - unsigned int flags -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Parse an ECDSA signature in compact (64 bytes) format. - * - * Returns: 1 when the signature could be parsed, 0 otherwise. - * Args: ctx: a secp256k1 context object - * Out: sig: a pointer to a signature object - * In: input64: a pointer to the 64-byte array to parse - * - * The signature must consist of a 32-byte big endian R value, followed by a - * 32-byte big endian S value. If R or S fall outside of [0..order-1], the - * encoding is invalid. R and S with value 0 are allowed in the encoding. - * - * After the call, sig will always be initialized. If parsing failed or R or - * S are zero, the resulting sig value is guaranteed to fail validation for any - * message and public key. - */ -SECP256K1_API int secp256k1_ecdsa_signature_parse_compact( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, - const unsigned char *input64 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Parse a DER ECDSA signature. - * - * Returns: 1 when the signature could be parsed, 0 otherwise. - * Args: ctx: a secp256k1 context object - * Out: sig: a pointer to a signature object - * In: input: a pointer to the signature to be parsed - * inputlen: the length of the array pointed to be input - * - * This function will accept any valid DER encoded signature, even if the - * encoded numbers are out of range. - * - * After the call, sig will always be initialized. If parsing failed or the - * encoded numbers are out of range, signature validation with it is - * guaranteed to fail for every message and public key. - */ -SECP256K1_API int secp256k1_ecdsa_signature_parse_der( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, - const unsigned char *input, - size_t inputlen -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Serialize an ECDSA signature in DER format. - * - * Returns: 1 if enough space was available to serialize, 0 otherwise - * Args: ctx: a secp256k1 context object - * Out: output: a pointer to an array to store the DER serialization - * In/Out: outputlen: a pointer to a length integer. Initially, this integer - * should be set to the length of output. After the call - * it will be set to the length of the serialization (even - * if 0 was returned). - * In: sig: a pointer to an initialized signature object - */ -SECP256K1_API int secp256k1_ecdsa_signature_serialize_der( - const secp256k1_context* ctx, - unsigned char *output, - size_t *outputlen, - const secp256k1_ecdsa_signature* sig -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Serialize an ECDSA signature in compact (64 byte) format. - * - * Returns: 1 - * Args: ctx: a secp256k1 context object - * Out: output64: a pointer to a 64-byte array to store the compact serialization - * In: sig: a pointer to an initialized signature object - * - * See secp256k1_ecdsa_signature_parse_compact for details about the encoding. - */ -SECP256K1_API int secp256k1_ecdsa_signature_serialize_compact( - const secp256k1_context* ctx, - unsigned char *output64, - const secp256k1_ecdsa_signature* sig -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Verify an ECDSA signature. - * - * Returns: 1: correct signature - * 0: incorrect or unparseable signature - * Args: ctx: a secp256k1 context object, initialized for verification. - * In: sig: the signature being verified (cannot be NULL) - * msg32: the 32-byte message hash being verified (cannot be NULL) - * pubkey: pointer to an initialized public key to verify with (cannot be NULL) - * - * To avoid accepting malleable signatures, only ECDSA signatures in lower-S - * form are accepted. - * - * If you need to accept ECDSA signatures from sources that do not obey this - * rule, apply secp256k1_ecdsa_signature_normalize to the signature prior to - * validation, but be aware that doing so results in malleable signatures. - * - * For details, see the comments for that function. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_verify( - const secp256k1_context* ctx, - const secp256k1_ecdsa_signature *sig, - const unsigned char *msg32, - const secp256k1_pubkey *pubkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Convert a signature to a normalized lower-S form. - * - * Returns: 1 if sigin was not normalized, 0 if it already was. - * Args: ctx: a secp256k1 context object - * Out: sigout: a pointer to a signature to fill with the normalized form, - * or copy if the input was already normalized. (can be NULL if - * you're only interested in whether the input was already - * normalized). - * In: sigin: a pointer to a signature to check/normalize (cannot be NULL, - * can be identical to sigout) - * - * With ECDSA a third-party can forge a second distinct signature of the same - * message, given a single initial signature, but without knowing the key. This - * is done by negating the S value modulo the order of the curve, 'flipping' - * the sign of the random point R which is not included in the signature. - * - * Forgery of the same message isn't universally problematic, but in systems - * where message malleability or uniqueness of signatures is important this can - * cause issues. This forgery can be blocked by all verifiers forcing signers - * to use a normalized form. - * - * The lower-S form reduces the size of signatures slightly on average when - * variable length encodings (such as DER) are used and is cheap to verify, - * making it a good choice. Security of always using lower-S is assured because - * anyone can trivially modify a signature after the fact to enforce this - * property anyway. - * - * The lower S value is always between 0x1 and - * 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0, - * inclusive. - * - * No other forms of ECDSA malleability are known and none seem likely, but - * there is no formal proof that ECDSA, even with this additional restriction, - * is free of other malleability. Commonly used serialization schemes will also - * accept various non-unique encodings, so care should be taken when this - * property is required for an application. - * - * The secp256k1_ecdsa_sign function will by default create signatures in the - * lower-S form, and secp256k1_ecdsa_verify will not accept others. In case - * signatures come from a system that cannot enforce this property, - * secp256k1_ecdsa_signature_normalize must be called before verification. - */ -SECP256K1_API int secp256k1_ecdsa_signature_normalize( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature *sigout, - const secp256k1_ecdsa_signature *sigin -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3); - -/** An implementation of RFC6979 (using HMAC-SHA256) as nonce generation function. - * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of - * extra entropy. - */ -SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_rfc6979; - -/** A default safe nonce generation function (currently equal to secp256k1_nonce_function_rfc6979). */ -SECP256K1_API extern const secp256k1_nonce_function secp256k1_nonce_function_default; - -/** Create an ECDSA signature. - * - * Returns: 1: signature created - * 0: the nonce generation function failed, or the private key was invalid. - * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL) - * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) - * In: msg32: the 32-byte message hash being signed (cannot be NULL) - * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used - * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) - * - * The created signature is always in lower-S form. See - * secp256k1_ecdsa_signature_normalize for more details. - */ -SECP256K1_API int secp256k1_ecdsa_sign( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature *sig, - const unsigned char *msg32, - const unsigned char *seckey, - secp256k1_nonce_function noncefp, - const void *ndata -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Verify an ECDSA secret key. - * - * Returns: 1: secret key is valid - * 0: secret key is invalid - * Args: ctx: pointer to a context object (cannot be NULL) - * In: seckey: pointer to a 32-byte secret key (cannot be NULL) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_seckey_verify( - const secp256k1_context* ctx, - const unsigned char *seckey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); - -/** Compute the public key for a secret key. - * - * Returns: 1: secret was valid, public key stores - * 0: secret was invalid, try again - * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL) - * Out: pubkey: pointer to the created public key (cannot be NULL) - * In: seckey: pointer to a 32-byte private key (cannot be NULL) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_create( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const unsigned char *seckey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Tweak a private key by adding tweak to it. - * Returns: 0 if the tweak was out of range (chance of around 1 in 2^128 for - * uniformly random 32-byte arrays, or if the resulting private key - * would be invalid (only when the tweak is the complement of the - * private key). 1 otherwise. - * Args: ctx: pointer to a context object (cannot be NULL). - * In/Out: seckey: pointer to a 32-byte private key. - * In: tweak: pointer to a 32-byte tweak. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_add( - const secp256k1_context* ctx, - unsigned char *seckey, - const unsigned char *tweak -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Tweak a public key by adding tweak times the generator to it. - * Returns: 0 if the tweak was out of range (chance of around 1 in 2^128 for - * uniformly random 32-byte arrays, or if the resulting public key - * would be invalid (only when the tweak is the complement of the - * corresponding private key). 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation - * (cannot be NULL). - * In/Out: pubkey: pointer to a public key object. - * In: tweak: pointer to a 32-byte tweak. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_add( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const unsigned char *tweak -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Tweak a private key by multiplying it by a tweak. - * Returns: 0 if the tweak was out of range (chance of around 1 in 2^128 for - * uniformly random 32-byte arrays, or equal to zero. 1 otherwise. - * Args: ctx: pointer to a context object (cannot be NULL). - * In/Out: seckey: pointer to a 32-byte private key. - * In: tweak: pointer to a 32-byte tweak. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_privkey_tweak_mul( - const secp256k1_context* ctx, - unsigned char *seckey, - const unsigned char *tweak -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Tweak a public key by multiplying it by a tweak value. - * Returns: 0 if the tweak was out of range (chance of around 1 in 2^128 for - * uniformly random 32-byte arrays, or equal to zero. 1 otherwise. - * Args: ctx: pointer to a context object initialized for validation - * (cannot be NULL). - * In/Out: pubkey: pointer to a public key obkect. - * In: tweak: pointer to a 32-byte tweak. - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const unsigned char *tweak -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Updates the context randomization. - * Returns: 1: randomization successfully updated - * 0: error - * Args: ctx: pointer to a context object (cannot be NULL) - * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( - secp256k1_context* ctx, - const unsigned char *seed32 -) SECP256K1_ARG_NONNULL(1); - -/** Add a number of public keys together. - * Returns: 1: the sum of the public keys is valid. - * 0: the sum of the public keys is not valid. - * Args: ctx: pointer to a context object - * Out: out: pointer to a public key object for placing the resulting public key - * (cannot be NULL) - * In: ins: pointer to array of pointers to public keys (cannot be NULL) - * n: the number of public keys to add together (must be at least 1) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine( - const secp256k1_context* ctx, - secp256k1_pubkey *out, - const secp256k1_pubkey * const * ins, - size_t n -) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -# ifdef __cplusplus -} -# endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_ecdh.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_ecdh.h deleted file mode 100644 index 4b84d7a96..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_ecdh.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _SECP256K1_ECDH_ -# define _SECP256K1_ECDH_ - -# include "secp256k1.h" - -# ifdef __cplusplus -extern "C" { -# endif - -/** Compute an EC Diffie-Hellman secret in constant time - * Returns: 1: exponentiation was successful - * 0: scalar was invalid (zero or overflow) - * Args: ctx: pointer to a context object (cannot be NULL) - * Out: result: a 32-byte array which will be populated by an ECDH - * secret computed from the point and scalar - * In: pubkey: a pointer to a secp256k1_pubkey containing an - * initialized public key - * privkey: a 32-byte scalar with which to multiply the point - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdh( - const secp256k1_context* ctx, - unsigned char *result, - const secp256k1_pubkey *pubkey, - const unsigned char *privkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -# ifdef __cplusplus -} -# endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_recovery.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_recovery.h deleted file mode 100644 index 055379725..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/include/secp256k1_recovery.h +++ /dev/null @@ -1,110 +0,0 @@ -#ifndef _SECP256K1_RECOVERY_ -# define _SECP256K1_RECOVERY_ - -# include "secp256k1.h" - -# ifdef __cplusplus -extern "C" { -# endif - -/** Opaque data structured that holds a parsed ECDSA signature, - * supporting pubkey recovery. - * - * The exact representation of data inside is implementation defined and not - * guaranteed to be portable between different platforms or versions. It is - * however guaranteed to be 65 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage or transmission, use - * the secp256k1_ecdsa_signature_serialize_* and - * secp256k1_ecdsa_signature_parse_* functions. - * - * Furthermore, it is guaranteed that identical signatures (including their - * recoverability) will have identical representation, so they can be - * memcmp'ed. - */ -typedef struct { - unsigned char data[65]; -} secp256k1_ecdsa_recoverable_signature; - -/** Parse a compact ECDSA signature (64 bytes + recovery id). - * - * Returns: 1 when the signature could be parsed, 0 otherwise - * Args: ctx: a secp256k1 context object - * Out: sig: a pointer to a signature object - * In: input64: a pointer to a 64-byte compact signature - * recid: the recovery id (0, 1, 2 or 3) - */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_parse_compact( - const secp256k1_context* ctx, - secp256k1_ecdsa_recoverable_signature* sig, - const unsigned char *input64, - int recid -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Convert a recoverable signature into a normal signature. - * - * Returns: 1 - * Out: sig: a pointer to a normal signature (cannot be NULL). - * In: sigin: a pointer to a recoverable signature (cannot be NULL). - */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_convert( - const secp256k1_context* ctx, - secp256k1_ecdsa_signature* sig, - const secp256k1_ecdsa_recoverable_signature* sigin -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); - -/** Serialize an ECDSA signature in compact format (64 bytes + recovery id). - * - * Returns: 1 - * Args: ctx: a secp256k1 context object - * Out: output64: a pointer to a 64-byte array of the compact signature (cannot be NULL) - * recid: a pointer to an integer to hold the recovery id (can be NULL). - * In: sig: a pointer to an initialized signature object (cannot be NULL) - */ -SECP256K1_API int secp256k1_ecdsa_recoverable_signature_serialize_compact( - const secp256k1_context* ctx, - unsigned char *output64, - int *recid, - const secp256k1_ecdsa_recoverable_signature* sig -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Create a recoverable ECDSA signature. - * - * Returns: 1: signature created - * 0: the nonce generation function failed, or the private key was invalid. - * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL) - * Out: sig: pointer to an array where the signature will be placed (cannot be NULL) - * In: msg32: the 32-byte message hash being signed (cannot be NULL) - * seckey: pointer to a 32-byte secret key (cannot be NULL) - * noncefp:pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used - * ndata: pointer to arbitrary data used by the nonce generation function (can be NULL) - */ -SECP256K1_API int secp256k1_ecdsa_sign_recoverable( - const secp256k1_context* ctx, - secp256k1_ecdsa_recoverable_signature *sig, - const unsigned char *msg32, - const unsigned char *seckey, - secp256k1_nonce_function noncefp, - const void *ndata -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -/** Recover an ECDSA public key from a signature. - * - * Returns: 1: public key successfully recovered (which guarantees a correct signature). - * 0: otherwise. - * Args: ctx: pointer to a context object, initialized for verification (cannot be NULL) - * Out: pubkey: pointer to the recovered public key (cannot be NULL) - * In: sig: pointer to initialized signature that supports pubkey recovery (cannot be NULL) - * msg32: the 32-byte message hash assumed to be signed (cannot be NULL) - */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ecdsa_recover( - const secp256k1_context* ctx, - secp256k1_pubkey *pubkey, - const secp256k1_ecdsa_recoverable_signature *sig, - const unsigned char *msg32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); - -# ifdef __cplusplus -} -# endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/libsecp256k1.pc.in b/crypto/secp256k1/internal/secp256k1/libsecp256k1/libsecp256k1.pc.in deleted file mode 100644 index a0d006f11..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/libsecp256k1.pc.in +++ /dev/null @@ -1,13 +0,0 @@ -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ -includedir=@includedir@ - -Name: libsecp256k1 -Description: Optimized C library for EC operations on curve secp256k1 -URL: https://github.com/bitcoin-core/secp256k1 -Version: @PACKAGE_VERSION@ -Cflags: -I${includedir} -Libs.private: @SECP_LIBS@ -Libs: -L${libdir} -lsecp256k1 - diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/obj/.gitignore b/crypto/secp256k1/internal/secp256k1/libsecp256k1/obj/.gitignore deleted file mode 100644 index e69de29bb..000000000 diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/group_prover.sage b/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/group_prover.sage deleted file mode 100644 index ab580c5b2..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/group_prover.sage +++ /dev/null @@ -1,322 +0,0 @@ -# This code supports verifying group implementations which have branches -# or conditional statements (like cmovs), by allowing each execution path -# to independently set assumptions on input or intermediary variables. -# -# The general approach is: -# * A constraint is a tuple of two sets of of symbolic expressions: -# the first of which are required to evaluate to zero, the second of which -# are required to evaluate to nonzero. -# - A constraint is said to be conflicting if any of its nonzero expressions -# is in the ideal with basis the zero expressions (in other words: when the -# zero expressions imply that one of the nonzero expressions are zero). -# * There is a list of laws that describe the intended behaviour, including -# laws for addition and doubling. Each law is called with the symbolic point -# coordinates as arguments, and returns: -# - A constraint describing the assumptions under which it is applicable, -# called "assumeLaw" -# - A constraint describing the requirements of the law, called "require" -# * Implementations are transliterated into functions that operate as well on -# algebraic input points, and are called once per combination of branches -# exectured. Each execution returns: -# - A constraint describing the assumptions this implementation requires -# (such as Z1=1), called "assumeFormula" -# - A constraint describing the assumptions this specific branch requires, -# but which is by construction guaranteed to cover the entire space by -# merging the results from all branches, called "assumeBranch" -# - The result of the computation -# * All combinations of laws with implementation branches are tried, and: -# - If the combination of assumeLaw, assumeFormula, and assumeBranch results -# in a conflict, it means this law does not apply to this branch, and it is -# skipped. -# - For others, we try to prove the require constraints hold, assuming the -# information in assumeLaw + assumeFormula + assumeBranch, and if this does -# not succeed, we fail. -# + To prove an expression is zero, we check whether it belongs to the -# ideal with the assumed zero expressions as basis. This test is exact. -# + To prove an expression is nonzero, we check whether each of its -# factors is contained in the set of nonzero assumptions' factors. -# This test is not exact, so various combinations of original and -# reduced expressions' factors are tried. -# - If we succeed, we print out the assumptions from assumeFormula that -# weren't implied by assumeLaw already. Those from assumeBranch are skipped, -# as we assume that all constraints in it are complementary with each other. -# -# Based on the sage verification scripts used in the Explicit-Formulas Database -# by Tanja Lange and others, see http://hyperelliptic.org/EFD - -class fastfrac: - """Fractions over rings.""" - - def __init__(self,R,top,bot=1): - """Construct a fractional, given a ring, a numerator, and denominator.""" - self.R = R - if parent(top) == ZZ or parent(top) == R: - self.top = R(top) - self.bot = R(bot) - elif top.__class__ == fastfrac: - self.top = top.top - self.bot = top.bot * bot - else: - self.top = R(numerator(top)) - self.bot = R(denominator(top)) * bot - - def iszero(self,I): - """Return whether this fraction is zero given an ideal.""" - return self.top in I and self.bot not in I - - def reduce(self,assumeZero): - zero = self.R.ideal(map(numerator, assumeZero)) - return fastfrac(self.R, zero.reduce(self.top)) / fastfrac(self.R, zero.reduce(self.bot)) - - def __add__(self,other): - """Add two fractions.""" - if parent(other) == ZZ: - return fastfrac(self.R,self.top + self.bot * other,self.bot) - if other.__class__ == fastfrac: - return fastfrac(self.R,self.top * other.bot + self.bot * other.top,self.bot * other.bot) - return NotImplemented - - def __sub__(self,other): - """Subtract two fractions.""" - if parent(other) == ZZ: - return fastfrac(self.R,self.top - self.bot * other,self.bot) - if other.__class__ == fastfrac: - return fastfrac(self.R,self.top * other.bot - self.bot * other.top,self.bot * other.bot) - return NotImplemented - - def __neg__(self): - """Return the negation of a fraction.""" - return fastfrac(self.R,-self.top,self.bot) - - def __mul__(self,other): - """Multiply two fractions.""" - if parent(other) == ZZ: - return fastfrac(self.R,self.top * other,self.bot) - if other.__class__ == fastfrac: - return fastfrac(self.R,self.top * other.top,self.bot * other.bot) - return NotImplemented - - def __rmul__(self,other): - """Multiply something else with a fraction.""" - return self.__mul__(other) - - def __div__(self,other): - """Divide two fractions.""" - if parent(other) == ZZ: - return fastfrac(self.R,self.top,self.bot * other) - if other.__class__ == fastfrac: - return fastfrac(self.R,self.top * other.bot,self.bot * other.top) - return NotImplemented - - def __pow__(self,other): - """Compute a power of a fraction.""" - if parent(other) == ZZ: - if other < 0: - # Negative powers require flipping top and bottom - return fastfrac(self.R,self.bot ^ (-other),self.top ^ (-other)) - else: - return fastfrac(self.R,self.top ^ other,self.bot ^ other) - return NotImplemented - - def __str__(self): - return "fastfrac((" + str(self.top) + ") / (" + str(self.bot) + "))" - def __repr__(self): - return "%s" % self - - def numerator(self): - return self.top - -class constraints: - """A set of constraints, consisting of zero and nonzero expressions. - - Constraints can either be used to express knowledge or a requirement. - - Both the fields zero and nonzero are maps from expressions to description - strings. The expressions that are the keys in zero are required to be zero, - and the expressions that are the keys in nonzero are required to be nonzero. - - Note that (a != 0) and (b != 0) is the same as (a*b != 0), so all keys in - nonzero could be multiplied into a single key. This is often much less - efficient to work with though, so we keep them separate inside the - constraints. This allows higher-level code to do fast checks on the individual - nonzero elements, or combine them if needed for stronger checks. - - We can't multiply the different zero elements, as it would suffice for one of - the factors to be zero, instead of all of them. Instead, the zero elements are - typically combined into an ideal first. - """ - - def __init__(self, **kwargs): - if 'zero' in kwargs: - self.zero = dict(kwargs['zero']) - else: - self.zero = dict() - if 'nonzero' in kwargs: - self.nonzero = dict(kwargs['nonzero']) - else: - self.nonzero = dict() - - def negate(self): - return constraints(zero=self.nonzero, nonzero=self.zero) - - def __add__(self, other): - zero = self.zero.copy() - zero.update(other.zero) - nonzero = self.nonzero.copy() - nonzero.update(other.nonzero) - return constraints(zero=zero, nonzero=nonzero) - - def __str__(self): - return "constraints(zero=%s,nonzero=%s)" % (self.zero, self.nonzero) - - def __repr__(self): - return "%s" % self - - -def conflicts(R, con): - """Check whether any of the passed non-zero assumptions is implied by the zero assumptions""" - zero = R.ideal(map(numerator, con.zero)) - if 1 in zero: - return True - # First a cheap check whether any of the individual nonzero terms conflict on - # their own. - for nonzero in con.nonzero: - if nonzero.iszero(zero): - return True - # It can be the case that entries in the nonzero set do not individually - # conflict with the zero set, but their combination does. For example, knowing - # that either x or y is zero is equivalent to having x*y in the zero set. - # Having x or y individually in the nonzero set is not a conflict, but both - # simultaneously is, so that is the right thing to check for. - if reduce(lambda a,b: a * b, con.nonzero, fastfrac(R, 1)).iszero(zero): - return True - return False - - -def get_nonzero_set(R, assume): - """Calculate a simple set of nonzero expressions""" - zero = R.ideal(map(numerator, assume.zero)) - nonzero = set() - for nz in map(numerator, assume.nonzero): - for (f,n) in nz.factor(): - nonzero.add(f) - rnz = zero.reduce(nz) - for (f,n) in rnz.factor(): - nonzero.add(f) - return nonzero - - -def prove_nonzero(R, exprs, assume): - """Check whether an expression is provably nonzero, given assumptions""" - zero = R.ideal(map(numerator, assume.zero)) - nonzero = get_nonzero_set(R, assume) - expl = set() - ok = True - for expr in exprs: - if numerator(expr) in zero: - return (False, [exprs[expr]]) - allexprs = reduce(lambda a,b: numerator(a)*numerator(b), exprs, 1) - for (f, n) in allexprs.factor(): - if f not in nonzero: - ok = False - if ok: - return (True, None) - ok = True - for (f, n) in zero.reduce(numerator(allexprs)).factor(): - if f not in nonzero: - ok = False - if ok: - return (True, None) - ok = True - for expr in exprs: - for (f,n) in numerator(expr).factor(): - if f not in nonzero: - ok = False - if ok: - return (True, None) - ok = True - for expr in exprs: - for (f,n) in zero.reduce(numerator(expr)).factor(): - if f not in nonzero: - expl.add(exprs[expr]) - if expl: - return (False, list(expl)) - else: - return (True, None) - - -def prove_zero(R, exprs, assume): - """Check whether all of the passed expressions are provably zero, given assumptions""" - r, e = prove_nonzero(R, dict(map(lambda x: (fastfrac(R, x.bot, 1), exprs[x]), exprs)), assume) - if not r: - return (False, map(lambda x: "Possibly zero denominator: %s" % x, e)) - zero = R.ideal(map(numerator, assume.zero)) - nonzero = prod(x for x in assume.nonzero) - expl = [] - for expr in exprs: - if not expr.iszero(zero): - expl.append(exprs[expr]) - if not expl: - return (True, None) - return (False, expl) - - -def describe_extra(R, assume, assumeExtra): - """Describe what assumptions are added, given existing assumptions""" - zerox = assume.zero.copy() - zerox.update(assumeExtra.zero) - zero = R.ideal(map(numerator, assume.zero)) - zeroextra = R.ideal(map(numerator, zerox)) - nonzero = get_nonzero_set(R, assume) - ret = set() - # Iterate over the extra zero expressions - for base in assumeExtra.zero: - if base not in zero: - add = [] - for (f, n) in numerator(base).factor(): - if f not in nonzero: - add += ["%s" % f] - if add: - ret.add((" * ".join(add)) + " = 0 [%s]" % assumeExtra.zero[base]) - # Iterate over the extra nonzero expressions - for nz in assumeExtra.nonzero: - nzr = zeroextra.reduce(numerator(nz)) - if nzr not in zeroextra: - for (f,n) in nzr.factor(): - if zeroextra.reduce(f) not in nonzero: - ret.add("%s != 0" % zeroextra.reduce(f)) - return ", ".join(x for x in ret) - - -def check_symbolic(R, assumeLaw, assumeAssert, assumeBranch, require): - """Check a set of zero and nonzero requirements, given a set of zero and nonzero assumptions""" - assume = assumeLaw + assumeAssert + assumeBranch - - if conflicts(R, assume): - # This formula does not apply - return None - - describe = describe_extra(R, assumeLaw + assumeBranch, assumeAssert) - - ok, msg = prove_zero(R, require.zero, assume) - if not ok: - return "FAIL, %s fails (assuming %s)" % (str(msg), describe) - - res, expl = prove_nonzero(R, require.nonzero, assume) - if not res: - return "FAIL, %s fails (assuming %s)" % (str(expl), describe) - - if describe != "": - return "OK (assuming %s)" % describe - else: - return "OK" - - -def concrete_verify(c): - for k in c.zero: - if k != 0: - return (False, c.zero[k]) - for k in c.nonzero: - if k == 0: - return (False, c.nonzero[k]) - return (True, None) diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/secp256k1.sage b/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/secp256k1.sage deleted file mode 100644 index a97e732f7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/secp256k1.sage +++ /dev/null @@ -1,306 +0,0 @@ -# Test libsecp256k1' group operation implementations using prover.sage - -import sys - -load("group_prover.sage") -load("weierstrass_prover.sage") - -def formula_secp256k1_gej_double_var(a): - """libsecp256k1's secp256k1_gej_double_var, used by various addition functions""" - rz = a.Z * a.Y - rz = rz * 2 - t1 = a.X^2 - t1 = t1 * 3 - t2 = t1^2 - t3 = a.Y^2 - t3 = t3 * 2 - t4 = t3^2 - t4 = t4 * 2 - t3 = t3 * a.X - rx = t3 - rx = rx * 4 - rx = -rx - rx = rx + t2 - t2 = -t2 - t3 = t3 * 6 - t3 = t3 + t2 - ry = t1 * t3 - t2 = -t4 - ry = ry + t2 - return jacobianpoint(rx, ry, rz) - -def formula_secp256k1_gej_add_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_var""" - if branch == 0: - return (constraints(), constraints(nonzero={a.Infinity : 'a_infinite'}), b) - if branch == 1: - return (constraints(), constraints(zero={a.Infinity : 'a_finite'}, nonzero={b.Infinity : 'b_infinite'}), a) - z22 = b.Z^2 - z12 = a.Z^2 - u1 = a.X * z22 - u2 = b.X * z12 - s1 = a.Y * z22 - s1 = s1 * b.Z - s2 = b.Y * z12 - s2 = s2 * a.Z - h = -u1 - h = h + u2 - i = -s1 - i = i + s2 - if branch == 2: - r = formula_secp256k1_gej_double_var(a) - return (constraints(), constraints(zero={h : 'h=0', i : 'i=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}), r) - if branch == 3: - return (constraints(), constraints(zero={h : 'h=0', a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 - h2 = h^2 - h3 = h2 * h - h = h * b.Z - rz = a.Z * h - t = u1 * h2 - rx = t - rx = rx * 2 - rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i - h3 = h3 * s1 - h3 = -h3 - ry = ry + h3 - return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) - -def formula_secp256k1_gej_add_ge_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_ge_var, which assume bz==1""" - if branch == 0: - return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(nonzero={a.Infinity : 'a_infinite'}), b) - if branch == 1: - return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite'}, nonzero={b.Infinity : 'b_infinite'}), a) - z12 = a.Z^2 - u1 = a.X - u2 = b.X * z12 - s1 = a.Y - s2 = b.Y * z12 - s2 = s2 * a.Z - h = -u1 - h = h + u2 - i = -s1 - i = i + s2 - if (branch == 2): - r = formula_secp256k1_gej_double_var(a) - return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) - if (branch == 3): - return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 - h2 = h^2 - h3 = h * h2 - rz = a.Z * h - t = u1 * h2 - rx = t - rx = rx * 2 - rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i - h3 = h3 * s1 - h3 = -h3 - ry = ry + h3 - return (constraints(zero={b.Z - 1 : 'b.z=1'}), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) - -def formula_secp256k1_gej_add_zinv_var(branch, a, b): - """libsecp256k1's secp256k1_gej_add_zinv_var""" - bzinv = b.Z^(-1) - if branch == 0: - return (constraints(), constraints(nonzero={b.Infinity : 'b_infinite'}), a) - if branch == 1: - bzinv2 = bzinv^2 - bzinv3 = bzinv2 * bzinv - rx = b.X * bzinv2 - ry = b.Y * bzinv3 - rz = 1 - return (constraints(), constraints(zero={b.Infinity : 'b_finite'}, nonzero={a.Infinity : 'a_infinite'}), jacobianpoint(rx, ry, rz)) - azz = a.Z * bzinv - z12 = azz^2 - u1 = a.X - u2 = b.X * z12 - s1 = a.Y - s2 = b.Y * z12 - s2 = s2 * azz - h = -u1 - h = h + u2 - i = -s1 - i = i + s2 - if branch == 2: - r = formula_secp256k1_gej_double_var(a) - return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0', i : 'i=0'}), r) - if branch == 3: - return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite', h : 'h=0'}, nonzero={i : 'i!=0'}), point_at_infinity()) - i2 = i^2 - h2 = h^2 - h3 = h * h2 - rz = a.Z - rz = rz * h - t = u1 * h2 - rx = t - rx = rx * 2 - rx = rx + h3 - rx = -rx - rx = rx + i2 - ry = -rx - ry = ry + t - ry = ry * i - h3 = h3 * s1 - h3 = -h3 - ry = ry + h3 - return (constraints(), constraints(zero={a.Infinity : 'a_finite', b.Infinity : 'b_finite'}, nonzero={h : 'h!=0'}), jacobianpoint(rx, ry, rz)) - -def formula_secp256k1_gej_add_ge(branch, a, b): - """libsecp256k1's secp256k1_gej_add_ge""" - zeroes = {} - nonzeroes = {} - a_infinity = False - if (branch & 4) != 0: - nonzeroes.update({a.Infinity : 'a_infinite'}) - a_infinity = True - else: - zeroes.update({a.Infinity : 'a_finite'}) - zz = a.Z^2 - u1 = a.X - u2 = b.X * zz - s1 = a.Y - s2 = b.Y * zz - s2 = s2 * a.Z - t = u1 - t = t + u2 - m = s1 - m = m + s2 - rr = t^2 - m_alt = -u2 - tt = u1 * m_alt - rr = rr + tt - degenerate = (branch & 3) == 3 - if (branch & 1) != 0: - zeroes.update({m : 'm_zero'}) - else: - nonzeroes.update({m : 'm_nonzero'}) - if (branch & 2) != 0: - zeroes.update({rr : 'rr_zero'}) - else: - nonzeroes.update({rr : 'rr_nonzero'}) - rr_alt = s1 - rr_alt = rr_alt * 2 - m_alt = m_alt + u1 - if not degenerate: - rr_alt = rr - m_alt = m - n = m_alt^2 - q = n * t - n = n^2 - if degenerate: - n = m - t = rr_alt^2 - rz = a.Z * m_alt - infinity = False - if (branch & 8) != 0: - if not a_infinity: - infinity = True - zeroes.update({rz : 'r.z=0'}) - else: - nonzeroes.update({rz : 'r.z!=0'}) - rz = rz * 2 - q = -q - t = t + q - rx = t - t = t * 2 - t = t + q - t = t * rr_alt - t = t + n - ry = -t - rx = rx * 4 - ry = ry * 4 - if a_infinity: - rx = b.X - ry = b.Y - rz = 1 - if infinity: - return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), point_at_infinity()) - return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zeroes, nonzero=nonzeroes), jacobianpoint(rx, ry, rz)) - -def formula_secp256k1_gej_add_ge_old(branch, a, b): - """libsecp256k1's old secp256k1_gej_add_ge, which fails when ay+by=0 but ax!=bx""" - a_infinity = (branch & 1) != 0 - zero = {} - nonzero = {} - if a_infinity: - nonzero.update({a.Infinity : 'a_infinite'}) - else: - zero.update({a.Infinity : 'a_finite'}) - zz = a.Z^2 - u1 = a.X - u2 = b.X * zz - s1 = a.Y - s2 = b.Y * zz - s2 = s2 * a.Z - z = a.Z - t = u1 - t = t + u2 - m = s1 - m = m + s2 - n = m^2 - q = n * t - n = n^2 - rr = t^2 - t = u1 * u2 - t = -t - rr = rr + t - t = rr^2 - rz = m * z - infinity = False - if (branch & 2) != 0: - if not a_infinity: - infinity = True - else: - return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(nonzero={z : 'conflict_a'}, zero={z : 'conflict_b'}), point_at_infinity()) - zero.update({rz : 'r.z=0'}) - else: - nonzero.update({rz : 'r.z!=0'}) - rz = rz * (0 if a_infinity else 2) - rx = t - q = -q - rx = rx + q - q = q * 3 - t = t * 2 - t = t + q - t = t * rr - t = t + n - ry = -t - rx = rx * (0 if a_infinity else 4) - ry = ry * (0 if a_infinity else 4) - t = b.X - t = t * (1 if a_infinity else 0) - rx = rx + t - t = b.Y - t = t * (1 if a_infinity else 0) - ry = ry + t - t = (1 if a_infinity else 0) - rz = rz + t - if infinity: - return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), point_at_infinity()) - return (constraints(zero={b.Z - 1 : 'b.z=1', b.Infinity : 'b_finite'}), constraints(zero=zero, nonzero=nonzero), jacobianpoint(rx, ry, rz)) - -if __name__ == "__main__": - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge) - check_symbolic_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old) - - if len(sys.argv) >= 2 and sys.argv[1] == "--exhaustive": - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_var", 0, 7, 5, formula_secp256k1_gej_add_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_var", 0, 7, 5, formula_secp256k1_gej_add_ge_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_zinv_var", 0, 7, 5, formula_secp256k1_gej_add_zinv_var, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge", 0, 7, 16, formula_secp256k1_gej_add_ge, 43) - check_exhaustive_jacobian_weierstrass("secp256k1_gej_add_ge_old [should fail]", 0, 7, 4, formula_secp256k1_gej_add_ge_old, 43) diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/weierstrass_prover.sage b/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/weierstrass_prover.sage deleted file mode 100644 index 03ef2ec90..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/sage/weierstrass_prover.sage +++ /dev/null @@ -1,264 +0,0 @@ -# Prover implementation for Weierstrass curves of the form -# y^2 = x^3 + A * x + B, specifically with a = 0 and b = 7, with group laws -# operating on affine and Jacobian coordinates, including the point at infinity -# represented by a 4th variable in coordinates. - -load("group_prover.sage") - - -class affinepoint: - def __init__(self, x, y, infinity=0): - self.x = x - self.y = y - self.infinity = infinity - def __str__(self): - return "affinepoint(x=%s,y=%s,inf=%s)" % (self.x, self.y, self.infinity) - - -class jacobianpoint: - def __init__(self, x, y, z, infinity=0): - self.X = x - self.Y = y - self.Z = z - self.Infinity = infinity - def __str__(self): - return "jacobianpoint(X=%s,Y=%s,Z=%s,inf=%s)" % (self.X, self.Y, self.Z, self.Infinity) - - -def point_at_infinity(): - return jacobianpoint(1, 1, 1, 1) - - -def negate(p): - if p.__class__ == affinepoint: - return affinepoint(p.x, -p.y) - if p.__class__ == jacobianpoint: - return jacobianpoint(p.X, -p.Y, p.Z) - assert(False) - - -def on_weierstrass_curve(A, B, p): - """Return a set of zero-expressions for an affine point to be on the curve""" - return constraints(zero={p.x^3 + A*p.x + B - p.y^2: 'on_curve'}) - - -def tangential_to_weierstrass_curve(A, B, p12, p3): - """Return a set of zero-expressions for ((x12,y12),(x3,y3)) to be a line that is tangential to the curve at (x12,y12)""" - return constraints(zero={ - (p12.y - p3.y) * (p12.y * 2) - (p12.x^2 * 3 + A) * (p12.x - p3.x): 'tangential_to_curve' - }) - - -def colinear(p1, p2, p3): - """Return a set of zero-expressions for ((x1,y1),(x2,y2),(x3,y3)) to be collinear""" - return constraints(zero={ - (p1.y - p2.y) * (p1.x - p3.x) - (p1.y - p3.y) * (p1.x - p2.x): 'colinear_1', - (p2.y - p3.y) * (p2.x - p1.x) - (p2.y - p1.y) * (p2.x - p3.x): 'colinear_2', - (p3.y - p1.y) * (p3.x - p2.x) - (p3.y - p2.y) * (p3.x - p1.x): 'colinear_3' - }) - - -def good_affine_point(p): - return constraints(nonzero={p.x : 'nonzero_x', p.y : 'nonzero_y'}) - - -def good_jacobian_point(p): - return constraints(nonzero={p.X : 'nonzero_X', p.Y : 'nonzero_Y', p.Z^6 : 'nonzero_Z'}) - - -def good_point(p): - return constraints(nonzero={p.Z^6 : 'nonzero_X'}) - - -def finite(p, *affine_fns): - con = good_point(p) + constraints(zero={p.Infinity : 'finite_point'}) - if p.Z != 0: - return con + reduce(lambda a, b: a + b, (f(affinepoint(p.X / p.Z^2, p.Y / p.Z^3)) for f in affine_fns), con) - else: - return con - -def infinite(p): - return constraints(nonzero={p.Infinity : 'infinite_point'}) - - -def law_jacobian_weierstrass_add(A, B, pa, pb, pA, pB, pC): - """Check whether the passed set of coordinates is a valid Jacobian add, given assumptions""" - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - on_weierstrass_curve(A, B, pa) + - on_weierstrass_curve(A, B, pb) + - finite(pA) + - finite(pB) + - constraints(nonzero={pa.x - pb.x : 'different_x'})) - require = (finite(pC, lambda pc: on_weierstrass_curve(A, B, pc) + - colinear(pa, pb, negate(pc)))) - return (assumeLaw, require) - - -def law_jacobian_weierstrass_double(A, B, pa, pb, pA, pB, pC): - """Check whether the passed set of coordinates is a valid Jacobian doubling, given assumptions""" - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - on_weierstrass_curve(A, B, pa) + - on_weierstrass_curve(A, B, pb) + - finite(pA) + - finite(pB) + - constraints(zero={pa.x - pb.x : 'equal_x', pa.y - pb.y : 'equal_y'})) - require = (finite(pC, lambda pc: on_weierstrass_curve(A, B, pc) + - tangential_to_weierstrass_curve(A, B, pa, negate(pc)))) - return (assumeLaw, require) - - -def law_jacobian_weierstrass_add_opposites(A, B, pa, pb, pA, pB, pC): - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - on_weierstrass_curve(A, B, pa) + - on_weierstrass_curve(A, B, pb) + - finite(pA) + - finite(pB) + - constraints(zero={pa.x - pb.x : 'equal_x', pa.y + pb.y : 'opposite_y'})) - require = infinite(pC) - return (assumeLaw, require) - - -def law_jacobian_weierstrass_add_infinite_a(A, B, pa, pb, pA, pB, pC): - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - on_weierstrass_curve(A, B, pb) + - infinite(pA) + - finite(pB)) - require = finite(pC, lambda pc: constraints(zero={pc.x - pb.x : 'c.x=b.x', pc.y - pb.y : 'c.y=b.y'})) - return (assumeLaw, require) - - -def law_jacobian_weierstrass_add_infinite_b(A, B, pa, pb, pA, pB, pC): - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - on_weierstrass_curve(A, B, pa) + - infinite(pB) + - finite(pA)) - require = finite(pC, lambda pc: constraints(zero={pc.x - pa.x : 'c.x=a.x', pc.y - pa.y : 'c.y=a.y'})) - return (assumeLaw, require) - - -def law_jacobian_weierstrass_add_infinite_ab(A, B, pa, pb, pA, pB, pC): - assumeLaw = (good_affine_point(pa) + - good_affine_point(pb) + - good_jacobian_point(pA) + - good_jacobian_point(pB) + - infinite(pA) + - infinite(pB)) - require = infinite(pC) - return (assumeLaw, require) - - -laws_jacobian_weierstrass = { - 'add': law_jacobian_weierstrass_add, - 'double': law_jacobian_weierstrass_double, - 'add_opposite': law_jacobian_weierstrass_add_opposites, - 'add_infinite_a': law_jacobian_weierstrass_add_infinite_a, - 'add_infinite_b': law_jacobian_weierstrass_add_infinite_b, - 'add_infinite_ab': law_jacobian_weierstrass_add_infinite_ab -} - - -def check_exhaustive_jacobian_weierstrass(name, A, B, branches, formula, p): - """Verify an implementation of addition of Jacobian points on a Weierstrass curve, by executing and validating the result for every possible addition in a prime field""" - F = Integers(p) - print "Formula %s on Z%i:" % (name, p) - points = [] - for x in xrange(0, p): - for y in xrange(0, p): - point = affinepoint(F(x), F(y)) - r, e = concrete_verify(on_weierstrass_curve(A, B, point)) - if r: - points.append(point) - - for za in xrange(1, p): - for zb in xrange(1, p): - for pa in points: - for pb in points: - for ia in xrange(2): - for ib in xrange(2): - pA = jacobianpoint(pa.x * F(za)^2, pa.y * F(za)^3, F(za), ia) - pB = jacobianpoint(pb.x * F(zb)^2, pb.y * F(zb)^3, F(zb), ib) - for branch in xrange(0, branches): - assumeAssert, assumeBranch, pC = formula(branch, pA, pB) - pC.X = F(pC.X) - pC.Y = F(pC.Y) - pC.Z = F(pC.Z) - pC.Infinity = F(pC.Infinity) - r, e = concrete_verify(assumeAssert + assumeBranch) - if r: - match = False - for key in laws_jacobian_weierstrass: - assumeLaw, require = laws_jacobian_weierstrass[key](A, B, pa, pb, pA, pB, pC) - r, e = concrete_verify(assumeLaw) - if r: - if match: - print " multiple branches for (%s,%s,%s,%s) + (%s,%s,%s,%s)" % (pA.X, pA.Y, pA.Z, pA.Infinity, pB.X, pB.Y, pB.Z, pB.Infinity) - else: - match = True - r, e = concrete_verify(require) - if not r: - print " failure in branch %i for (%s,%s,%s,%s) + (%s,%s,%s,%s) = (%s,%s,%s,%s): %s" % (branch, pA.X, pA.Y, pA.Z, pA.Infinity, pB.X, pB.Y, pB.Z, pB.Infinity, pC.X, pC.Y, pC.Z, pC.Infinity, e) - print - - -def check_symbolic_function(R, assumeAssert, assumeBranch, f, A, B, pa, pb, pA, pB, pC): - assumeLaw, require = f(A, B, pa, pb, pA, pB, pC) - return check_symbolic(R, assumeLaw, assumeAssert, assumeBranch, require) - -def check_symbolic_jacobian_weierstrass(name, A, B, branches, formula): - """Verify an implementation of addition of Jacobian points on a Weierstrass curve symbolically""" - R. = PolynomialRing(QQ,8,order='invlex') - lift = lambda x: fastfrac(R,x) - ax = lift(ax) - ay = lift(ay) - Az = lift(Az) - bx = lift(bx) - by = lift(by) - Bz = lift(Bz) - Ai = lift(Ai) - Bi = lift(Bi) - - pa = affinepoint(ax, ay, Ai) - pb = affinepoint(bx, by, Bi) - pA = jacobianpoint(ax * Az^2, ay * Az^3, Az, Ai) - pB = jacobianpoint(bx * Bz^2, by * Bz^3, Bz, Bi) - - res = {} - - for key in laws_jacobian_weierstrass: - res[key] = [] - - print ("Formula " + name + ":") - count = 0 - for branch in xrange(branches): - assumeFormula, assumeBranch, pC = formula(branch, pA, pB) - pC.X = lift(pC.X) - pC.Y = lift(pC.Y) - pC.Z = lift(pC.Z) - pC.Infinity = lift(pC.Infinity) - - for key in laws_jacobian_weierstrass: - res[key].append((check_symbolic_function(R, assumeFormula, assumeBranch, laws_jacobian_weierstrass[key], A, B, pa, pb, pA, pB, pC), branch)) - - for key in res: - print " %s:" % key - val = res[key] - for x in val: - if x[0] is not None: - print " branch %i: %s" % (x[1], x[0]) - - print diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s deleted file mode 100644 index 5df561f2f..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s +++ /dev/null @@ -1,919 +0,0 @@ -@ vim: set tabstop=8 softtabstop=8 shiftwidth=8 noexpandtab syntax=armasm: -/********************************************************************** - * Copyright (c) 2014 Wladimir J. van der Laan * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ -/* -ARM implementation of field_10x26 inner loops. - -Note: - -- To avoid unnecessary loads and make use of available registers, two - 'passes' have every time been interleaved, with the odd passes accumulating c' and d' - which will be added to c and d respectively in the the even passes - -*/ - - .syntax unified - .arch armv7-a - @ eabi attributes - see readelf -A - .eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes - .eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no - .eabi_attribute 10, 0 @ Tag_FP_arch = none - .eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte - .eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP - .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Agressive Speed - .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6 - .text - - @ Field constants - .set field_R0, 0x3d10 - .set field_R1, 0x400 - .set field_not_M, 0xfc000000 @ ~M = ~0x3ffffff - - .align 2 - .global secp256k1_fe_mul_inner - .type secp256k1_fe_mul_inner, %function - @ Arguments: - @ r0 r Restrict: can overlap with a, not with b - @ r1 a - @ r2 b - @ Stack (total 4+10*4 = 44) - @ sp + #0 saved 'r' pointer - @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -secp256k1_fe_mul_inner: - stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} - sub sp, sp, #48 @ frame=44 + alignment - str r0, [sp, #0] @ save result address, we need it only at the end - - /****************************************** - * Main computation code. - ****************************************** - - Allocation: - r0,r14,r7,r8 scratch - r1 a (pointer) - r2 b (pointer) - r3:r4 c - r5:r6 d - r11:r12 c' - r9:r10 d' - - Note: do not write to r[] here, it may overlap with a[] - */ - - /* A - interleaved with B */ - ldr r7, [r1, #0*4] @ a[0] - ldr r8, [r2, #9*4] @ b[9] - ldr r0, [r1, #1*4] @ a[1] - umull r5, r6, r7, r8 @ d = a[0] * b[9] - ldr r14, [r2, #8*4] @ b[8] - umull r9, r10, r0, r8 @ d' = a[1] * b[9] - ldr r7, [r1, #2*4] @ a[2] - umlal r5, r6, r0, r14 @ d += a[1] * b[8] - ldr r8, [r2, #7*4] @ b[7] - umlal r9, r10, r7, r14 @ d' += a[2] * b[8] - ldr r0, [r1, #3*4] @ a[3] - umlal r5, r6, r7, r8 @ d += a[2] * b[7] - ldr r14, [r2, #6*4] @ b[6] - umlal r9, r10, r0, r8 @ d' += a[3] * b[7] - ldr r7, [r1, #4*4] @ a[4] - umlal r5, r6, r0, r14 @ d += a[3] * b[6] - ldr r8, [r2, #5*4] @ b[5] - umlal r9, r10, r7, r14 @ d' += a[4] * b[6] - ldr r0, [r1, #5*4] @ a[5] - umlal r5, r6, r7, r8 @ d += a[4] * b[5] - ldr r14, [r2, #4*4] @ b[4] - umlal r9, r10, r0, r8 @ d' += a[5] * b[5] - ldr r7, [r1, #6*4] @ a[6] - umlal r5, r6, r0, r14 @ d += a[5] * b[4] - ldr r8, [r2, #3*4] @ b[3] - umlal r9, r10, r7, r14 @ d' += a[6] * b[4] - ldr r0, [r1, #7*4] @ a[7] - umlal r5, r6, r7, r8 @ d += a[6] * b[3] - ldr r14, [r2, #2*4] @ b[2] - umlal r9, r10, r0, r8 @ d' += a[7] * b[3] - ldr r7, [r1, #8*4] @ a[8] - umlal r5, r6, r0, r14 @ d += a[7] * b[2] - ldr r8, [r2, #1*4] @ b[1] - umlal r9, r10, r7, r14 @ d' += a[8] * b[2] - ldr r0, [r1, #9*4] @ a[9] - umlal r5, r6, r7, r8 @ d += a[8] * b[1] - ldr r14, [r2, #0*4] @ b[0] - umlal r9, r10, r0, r8 @ d' += a[9] * b[1] - ldr r7, [r1, #0*4] @ a[0] - umlal r5, r6, r0, r14 @ d += a[9] * b[0] - @ r7,r14 used in B - - bic r0, r5, field_not_M @ t9 = d & M - str r0, [sp, #4 + 4*9] - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - - /* B */ - umull r3, r4, r7, r14 @ c = a[0] * b[0] - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u0 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u0 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t0 = c & M - str r14, [sp, #4 + 0*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u0 * R1 - umlal r3, r4, r0, r14 - - /* C - interleaved with D */ - ldr r7, [r1, #0*4] @ a[0] - ldr r8, [r2, #2*4] @ b[2] - ldr r14, [r2, #1*4] @ b[1] - umull r11, r12, r7, r8 @ c' = a[0] * b[2] - ldr r0, [r1, #1*4] @ a[1] - umlal r3, r4, r7, r14 @ c += a[0] * b[1] - ldr r8, [r2, #0*4] @ b[0] - umlal r11, r12, r0, r14 @ c' += a[1] * b[1] - ldr r7, [r1, #2*4] @ a[2] - umlal r3, r4, r0, r8 @ c += a[1] * b[0] - ldr r14, [r2, #9*4] @ b[9] - umlal r11, r12, r7, r8 @ c' += a[2] * b[0] - ldr r0, [r1, #3*4] @ a[3] - umlal r5, r6, r7, r14 @ d += a[2] * b[9] - ldr r8, [r2, #8*4] @ b[8] - umull r9, r10, r0, r14 @ d' = a[3] * b[9] - ldr r7, [r1, #4*4] @ a[4] - umlal r5, r6, r0, r8 @ d += a[3] * b[8] - ldr r14, [r2, #7*4] @ b[7] - umlal r9, r10, r7, r8 @ d' += a[4] * b[8] - ldr r0, [r1, #5*4] @ a[5] - umlal r5, r6, r7, r14 @ d += a[4] * b[7] - ldr r8, [r2, #6*4] @ b[6] - umlal r9, r10, r0, r14 @ d' += a[5] * b[7] - ldr r7, [r1, #6*4] @ a[6] - umlal r5, r6, r0, r8 @ d += a[5] * b[6] - ldr r14, [r2, #5*4] @ b[5] - umlal r9, r10, r7, r8 @ d' += a[6] * b[6] - ldr r0, [r1, #7*4] @ a[7] - umlal r5, r6, r7, r14 @ d += a[6] * b[5] - ldr r8, [r2, #4*4] @ b[4] - umlal r9, r10, r0, r14 @ d' += a[7] * b[5] - ldr r7, [r1, #8*4] @ a[8] - umlal r5, r6, r0, r8 @ d += a[7] * b[4] - ldr r14, [r2, #3*4] @ b[3] - umlal r9, r10, r7, r8 @ d' += a[8] * b[4] - ldr r0, [r1, #9*4] @ a[9] - umlal r5, r6, r7, r14 @ d += a[8] * b[3] - ldr r8, [r2, #2*4] @ b[2] - umlal r9, r10, r0, r14 @ d' += a[9] * b[3] - umlal r5, r6, r0, r8 @ d += a[9] * b[2] - - bic r0, r5, field_not_M @ u1 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u1 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t1 = c & M - str r14, [sp, #4 + 1*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u1 * R1 - umlal r3, r4, r0, r14 - - /* D */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u2 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u2 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t2 = c & M - str r14, [sp, #4 + 2*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u2 * R1 - umlal r3, r4, r0, r14 - - /* E - interleaved with F */ - ldr r7, [r1, #0*4] @ a[0] - ldr r8, [r2, #4*4] @ b[4] - umull r11, r12, r7, r8 @ c' = a[0] * b[4] - ldr r8, [r2, #3*4] @ b[3] - umlal r3, r4, r7, r8 @ c += a[0] * b[3] - ldr r7, [r1, #1*4] @ a[1] - umlal r11, r12, r7, r8 @ c' += a[1] * b[3] - ldr r8, [r2, #2*4] @ b[2] - umlal r3, r4, r7, r8 @ c += a[1] * b[2] - ldr r7, [r1, #2*4] @ a[2] - umlal r11, r12, r7, r8 @ c' += a[2] * b[2] - ldr r8, [r2, #1*4] @ b[1] - umlal r3, r4, r7, r8 @ c += a[2] * b[1] - ldr r7, [r1, #3*4] @ a[3] - umlal r11, r12, r7, r8 @ c' += a[3] * b[1] - ldr r8, [r2, #0*4] @ b[0] - umlal r3, r4, r7, r8 @ c += a[3] * b[0] - ldr r7, [r1, #4*4] @ a[4] - umlal r11, r12, r7, r8 @ c' += a[4] * b[0] - ldr r8, [r2, #9*4] @ b[9] - umlal r5, r6, r7, r8 @ d += a[4] * b[9] - ldr r7, [r1, #5*4] @ a[5] - umull r9, r10, r7, r8 @ d' = a[5] * b[9] - ldr r8, [r2, #8*4] @ b[8] - umlal r5, r6, r7, r8 @ d += a[5] * b[8] - ldr r7, [r1, #6*4] @ a[6] - umlal r9, r10, r7, r8 @ d' += a[6] * b[8] - ldr r8, [r2, #7*4] @ b[7] - umlal r5, r6, r7, r8 @ d += a[6] * b[7] - ldr r7, [r1, #7*4] @ a[7] - umlal r9, r10, r7, r8 @ d' += a[7] * b[7] - ldr r8, [r2, #6*4] @ b[6] - umlal r5, r6, r7, r8 @ d += a[7] * b[6] - ldr r7, [r1, #8*4] @ a[8] - umlal r9, r10, r7, r8 @ d' += a[8] * b[6] - ldr r8, [r2, #5*4] @ b[5] - umlal r5, r6, r7, r8 @ d += a[8] * b[5] - ldr r7, [r1, #9*4] @ a[9] - umlal r9, r10, r7, r8 @ d' += a[9] * b[5] - ldr r8, [r2, #4*4] @ b[4] - umlal r5, r6, r7, r8 @ d += a[9] * b[4] - - bic r0, r5, field_not_M @ u3 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u3 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t3 = c & M - str r14, [sp, #4 + 3*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u3 * R1 - umlal r3, r4, r0, r14 - - /* F */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u4 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u4 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t4 = c & M - str r14, [sp, #4 + 4*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u4 * R1 - umlal r3, r4, r0, r14 - - /* G - interleaved with H */ - ldr r7, [r1, #0*4] @ a[0] - ldr r8, [r2, #6*4] @ b[6] - ldr r14, [r2, #5*4] @ b[5] - umull r11, r12, r7, r8 @ c' = a[0] * b[6] - ldr r0, [r1, #1*4] @ a[1] - umlal r3, r4, r7, r14 @ c += a[0] * b[5] - ldr r8, [r2, #4*4] @ b[4] - umlal r11, r12, r0, r14 @ c' += a[1] * b[5] - ldr r7, [r1, #2*4] @ a[2] - umlal r3, r4, r0, r8 @ c += a[1] * b[4] - ldr r14, [r2, #3*4] @ b[3] - umlal r11, r12, r7, r8 @ c' += a[2] * b[4] - ldr r0, [r1, #3*4] @ a[3] - umlal r3, r4, r7, r14 @ c += a[2] * b[3] - ldr r8, [r2, #2*4] @ b[2] - umlal r11, r12, r0, r14 @ c' += a[3] * b[3] - ldr r7, [r1, #4*4] @ a[4] - umlal r3, r4, r0, r8 @ c += a[3] * b[2] - ldr r14, [r2, #1*4] @ b[1] - umlal r11, r12, r7, r8 @ c' += a[4] * b[2] - ldr r0, [r1, #5*4] @ a[5] - umlal r3, r4, r7, r14 @ c += a[4] * b[1] - ldr r8, [r2, #0*4] @ b[0] - umlal r11, r12, r0, r14 @ c' += a[5] * b[1] - ldr r7, [r1, #6*4] @ a[6] - umlal r3, r4, r0, r8 @ c += a[5] * b[0] - ldr r14, [r2, #9*4] @ b[9] - umlal r11, r12, r7, r8 @ c' += a[6] * b[0] - ldr r0, [r1, #7*4] @ a[7] - umlal r5, r6, r7, r14 @ d += a[6] * b[9] - ldr r8, [r2, #8*4] @ b[8] - umull r9, r10, r0, r14 @ d' = a[7] * b[9] - ldr r7, [r1, #8*4] @ a[8] - umlal r5, r6, r0, r8 @ d += a[7] * b[8] - ldr r14, [r2, #7*4] @ b[7] - umlal r9, r10, r7, r8 @ d' += a[8] * b[8] - ldr r0, [r1, #9*4] @ a[9] - umlal r5, r6, r7, r14 @ d += a[8] * b[7] - ldr r8, [r2, #6*4] @ b[6] - umlal r9, r10, r0, r14 @ d' += a[9] * b[7] - umlal r5, r6, r0, r8 @ d += a[9] * b[6] - - bic r0, r5, field_not_M @ u5 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u5 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t5 = c & M - str r14, [sp, #4 + 5*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u5 * R1 - umlal r3, r4, r0, r14 - - /* H */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u6 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u6 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t6 = c & M - str r14, [sp, #4 + 6*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u6 * R1 - umlal r3, r4, r0, r14 - - /* I - interleaved with J */ - ldr r8, [r2, #8*4] @ b[8] - ldr r7, [r1, #0*4] @ a[0] - ldr r14, [r2, #7*4] @ b[7] - umull r11, r12, r7, r8 @ c' = a[0] * b[8] - ldr r0, [r1, #1*4] @ a[1] - umlal r3, r4, r7, r14 @ c += a[0] * b[7] - ldr r8, [r2, #6*4] @ b[6] - umlal r11, r12, r0, r14 @ c' += a[1] * b[7] - ldr r7, [r1, #2*4] @ a[2] - umlal r3, r4, r0, r8 @ c += a[1] * b[6] - ldr r14, [r2, #5*4] @ b[5] - umlal r11, r12, r7, r8 @ c' += a[2] * b[6] - ldr r0, [r1, #3*4] @ a[3] - umlal r3, r4, r7, r14 @ c += a[2] * b[5] - ldr r8, [r2, #4*4] @ b[4] - umlal r11, r12, r0, r14 @ c' += a[3] * b[5] - ldr r7, [r1, #4*4] @ a[4] - umlal r3, r4, r0, r8 @ c += a[3] * b[4] - ldr r14, [r2, #3*4] @ b[3] - umlal r11, r12, r7, r8 @ c' += a[4] * b[4] - ldr r0, [r1, #5*4] @ a[5] - umlal r3, r4, r7, r14 @ c += a[4] * b[3] - ldr r8, [r2, #2*4] @ b[2] - umlal r11, r12, r0, r14 @ c' += a[5] * b[3] - ldr r7, [r1, #6*4] @ a[6] - umlal r3, r4, r0, r8 @ c += a[5] * b[2] - ldr r14, [r2, #1*4] @ b[1] - umlal r11, r12, r7, r8 @ c' += a[6] * b[2] - ldr r0, [r1, #7*4] @ a[7] - umlal r3, r4, r7, r14 @ c += a[6] * b[1] - ldr r8, [r2, #0*4] @ b[0] - umlal r11, r12, r0, r14 @ c' += a[7] * b[1] - ldr r7, [r1, #8*4] @ a[8] - umlal r3, r4, r0, r8 @ c += a[7] * b[0] - ldr r14, [r2, #9*4] @ b[9] - umlal r11, r12, r7, r8 @ c' += a[8] * b[0] - ldr r0, [r1, #9*4] @ a[9] - umlal r5, r6, r7, r14 @ d += a[8] * b[9] - ldr r8, [r2, #8*4] @ b[8] - umull r9, r10, r0, r14 @ d' = a[9] * b[9] - umlal r5, r6, r0, r8 @ d += a[9] * b[8] - - bic r0, r5, field_not_M @ u7 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u7 * R0 - umlal r3, r4, r0, r14 - - bic r14, r3, field_not_M @ t7 = c & M - str r14, [sp, #4 + 7*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u7 * R1 - umlal r3, r4, r0, r14 - - /* J */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u8 = d & M - str r0, [sp, #4 + 8*4] - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u8 * R0 - umlal r3, r4, r0, r14 - - /****************************************** - * compute and write back result - ****************************************** - Allocation: - r0 r - r3:r4 c - r5:r6 d - r7 t0 - r8 t1 - r9 t2 - r11 u8 - r12 t9 - r1,r2,r10,r14 scratch - - Note: do not read from a[] after here, it may overlap with r[] - */ - ldr r0, [sp, #0] - add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9 - ldmia r1, {r2,r7,r8,r9,r10,r11,r12} - add r1, r0, #3*4 - stmia r1, {r2,r7,r8,r9,r10} - - bic r2, r3, field_not_M @ r[8] = c & M - str r2, [r0, #8*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u8 * R1 - umlal r3, r4, r11, r14 - movw r14, field_R0 @ c += d * R0 - umlal r3, r4, r5, r14 - adds r3, r3, r12 @ c += t9 - adc r4, r4, #0 - - add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2 - ldmia r1, {r7,r8,r9} - - ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4) - str r2, [r0, #9*4] - mov r3, r3, lsr #22 @ c >>= 22 - orr r3, r3, r4, asl #10 - mov r4, r4, lsr #22 - movw r14, field_R1 << 4 @ c += d * (R1 << 4) - umlal r3, r4, r5, r14 - - movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add) - umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4) - adds r5, r5, r7 @ d.lo += t0 - mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4) - adc r6, r6, 0 @ d.hi += carry - - bic r2, r5, field_not_M @ r[0] = d & M - str r2, [r0, #0*4] - - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - - movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add) - umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4) - adds r5, r5, r8 @ d.lo += t1 - adc r6, r6, #0 @ d.hi += carry - adds r5, r5, r1 @ d.lo += tmp.lo - mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4) - adc r6, r6, r2 @ d.hi += carry + tmp.hi - - bic r2, r5, field_not_M @ r[1] = d & M - str r2, [r0, #1*4] - mov r5, r5, lsr #26 @ d >>= 26 (ignore hi) - orr r5, r5, r6, asl #6 - - add r5, r5, r9 @ d += t2 - str r5, [r0, #2*4] @ r[2] = d - - add sp, sp, #48 - ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size secp256k1_fe_mul_inner, .-secp256k1_fe_mul_inner - - .align 2 - .global secp256k1_fe_sqr_inner - .type secp256k1_fe_sqr_inner, %function - @ Arguments: - @ r0 r Can overlap with a - @ r1 a - @ Stack (total 4+10*4 = 44) - @ sp + #0 saved 'r' pointer - @ sp + #4 + 4*X t0,t1,t2,t3,t4,t5,t6,t7,u8,t9 -secp256k1_fe_sqr_inner: - stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r14} - sub sp, sp, #48 @ frame=44 + alignment - str r0, [sp, #0] @ save result address, we need it only at the end - /****************************************** - * Main computation code. - ****************************************** - - Allocation: - r0,r14,r2,r7,r8 scratch - r1 a (pointer) - r3:r4 c - r5:r6 d - r11:r12 c' - r9:r10 d' - - Note: do not write to r[] here, it may overlap with a[] - */ - /* A interleaved with B */ - ldr r0, [r1, #1*4] @ a[1]*2 - ldr r7, [r1, #0*4] @ a[0] - mov r0, r0, asl #1 - ldr r14, [r1, #9*4] @ a[9] - umull r3, r4, r7, r7 @ c = a[0] * a[0] - ldr r8, [r1, #8*4] @ a[8] - mov r7, r7, asl #1 - umull r5, r6, r7, r14 @ d = a[0]*2 * a[9] - ldr r7, [r1, #2*4] @ a[2]*2 - umull r9, r10, r0, r14 @ d' = a[1]*2 * a[9] - ldr r14, [r1, #7*4] @ a[7] - umlal r5, r6, r0, r8 @ d += a[1]*2 * a[8] - mov r7, r7, asl #1 - ldr r0, [r1, #3*4] @ a[3]*2 - umlal r9, r10, r7, r8 @ d' += a[2]*2 * a[8] - ldr r8, [r1, #6*4] @ a[6] - umlal r5, r6, r7, r14 @ d += a[2]*2 * a[7] - mov r0, r0, asl #1 - ldr r7, [r1, #4*4] @ a[4]*2 - umlal r9, r10, r0, r14 @ d' += a[3]*2 * a[7] - ldr r14, [r1, #5*4] @ a[5] - mov r7, r7, asl #1 - umlal r5, r6, r0, r8 @ d += a[3]*2 * a[6] - umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[6] - umlal r5, r6, r7, r14 @ d += a[4]*2 * a[5] - umlal r9, r10, r14, r14 @ d' += a[5] * a[5] - - bic r0, r5, field_not_M @ t9 = d & M - str r0, [sp, #4 + 9*4] - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - - /* B */ - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u0 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u0 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t0 = c & M - str r14, [sp, #4 + 0*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u0 * R1 - umlal r3, r4, r0, r14 - - /* C interleaved with D */ - ldr r0, [r1, #0*4] @ a[0]*2 - ldr r14, [r1, #1*4] @ a[1] - mov r0, r0, asl #1 - ldr r8, [r1, #2*4] @ a[2] - umlal r3, r4, r0, r14 @ c += a[0]*2 * a[1] - mov r7, r8, asl #1 @ a[2]*2 - umull r11, r12, r14, r14 @ c' = a[1] * a[1] - ldr r14, [r1, #9*4] @ a[9] - umlal r11, r12, r0, r8 @ c' += a[0]*2 * a[2] - ldr r0, [r1, #3*4] @ a[3]*2 - ldr r8, [r1, #8*4] @ a[8] - umlal r5, r6, r7, r14 @ d += a[2]*2 * a[9] - mov r0, r0, asl #1 - ldr r7, [r1, #4*4] @ a[4]*2 - umull r9, r10, r0, r14 @ d' = a[3]*2 * a[9] - ldr r14, [r1, #7*4] @ a[7] - umlal r5, r6, r0, r8 @ d += a[3]*2 * a[8] - mov r7, r7, asl #1 - ldr r0, [r1, #5*4] @ a[5]*2 - umlal r9, r10, r7, r8 @ d' += a[4]*2 * a[8] - ldr r8, [r1, #6*4] @ a[6] - mov r0, r0, asl #1 - umlal r5, r6, r7, r14 @ d += a[4]*2 * a[7] - umlal r9, r10, r0, r14 @ d' += a[5]*2 * a[7] - umlal r5, r6, r0, r8 @ d += a[5]*2 * a[6] - umlal r9, r10, r8, r8 @ d' += a[6] * a[6] - - bic r0, r5, field_not_M @ u1 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u1 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t1 = c & M - str r14, [sp, #4 + 1*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u1 * R1 - umlal r3, r4, r0, r14 - - /* D */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u2 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u2 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t2 = c & M - str r14, [sp, #4 + 2*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u2 * R1 - umlal r3, r4, r0, r14 - - /* E interleaved with F */ - ldr r7, [r1, #0*4] @ a[0]*2 - ldr r0, [r1, #1*4] @ a[1]*2 - ldr r14, [r1, #2*4] @ a[2] - mov r7, r7, asl #1 - ldr r8, [r1, #3*4] @ a[3] - ldr r2, [r1, #4*4] - umlal r3, r4, r7, r8 @ c += a[0]*2 * a[3] - mov r0, r0, asl #1 - umull r11, r12, r7, r2 @ c' = a[0]*2 * a[4] - mov r2, r2, asl #1 @ a[4]*2 - umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[3] - ldr r8, [r1, #9*4] @ a[9] - umlal r3, r4, r0, r14 @ c += a[1]*2 * a[2] - ldr r0, [r1, #5*4] @ a[5]*2 - umlal r11, r12, r14, r14 @ c' += a[2] * a[2] - ldr r14, [r1, #8*4] @ a[8] - mov r0, r0, asl #1 - umlal r5, r6, r2, r8 @ d += a[4]*2 * a[9] - ldr r7, [r1, #6*4] @ a[6]*2 - umull r9, r10, r0, r8 @ d' = a[5]*2 * a[9] - mov r7, r7, asl #1 - ldr r8, [r1, #7*4] @ a[7] - umlal r5, r6, r0, r14 @ d += a[5]*2 * a[8] - umlal r9, r10, r7, r14 @ d' += a[6]*2 * a[8] - umlal r5, r6, r7, r8 @ d += a[6]*2 * a[7] - umlal r9, r10, r8, r8 @ d' += a[7] * a[7] - - bic r0, r5, field_not_M @ u3 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u3 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t3 = c & M - str r14, [sp, #4 + 3*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u3 * R1 - umlal r3, r4, r0, r14 - - /* F */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u4 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u4 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t4 = c & M - str r14, [sp, #4 + 4*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u4 * R1 - umlal r3, r4, r0, r14 - - /* G interleaved with H */ - ldr r7, [r1, #0*4] @ a[0]*2 - ldr r0, [r1, #1*4] @ a[1]*2 - mov r7, r7, asl #1 - ldr r8, [r1, #5*4] @ a[5] - ldr r2, [r1, #6*4] @ a[6] - umlal r3, r4, r7, r8 @ c += a[0]*2 * a[5] - ldr r14, [r1, #4*4] @ a[4] - mov r0, r0, asl #1 - umull r11, r12, r7, r2 @ c' = a[0]*2 * a[6] - ldr r7, [r1, #2*4] @ a[2]*2 - umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[5] - mov r7, r7, asl #1 - ldr r8, [r1, #3*4] @ a[3] - umlal r3, r4, r0, r14 @ c += a[1]*2 * a[4] - mov r0, r2, asl #1 @ a[6]*2 - umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[4] - ldr r14, [r1, #9*4] @ a[9] - umlal r3, r4, r7, r8 @ c += a[2]*2 * a[3] - ldr r7, [r1, #7*4] @ a[7]*2 - umlal r11, r12, r8, r8 @ c' += a[3] * a[3] - mov r7, r7, asl #1 - ldr r8, [r1, #8*4] @ a[8] - umlal r5, r6, r0, r14 @ d += a[6]*2 * a[9] - umull r9, r10, r7, r14 @ d' = a[7]*2 * a[9] - umlal r5, r6, r7, r8 @ d += a[7]*2 * a[8] - umlal r9, r10, r8, r8 @ d' += a[8] * a[8] - - bic r0, r5, field_not_M @ u5 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u5 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t5 = c & M - str r14, [sp, #4 + 5*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u5 * R1 - umlal r3, r4, r0, r14 - - /* H */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - adds r5, r5, r9 @ d += d' - adc r6, r6, r10 - - bic r0, r5, field_not_M @ u6 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u6 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t6 = c & M - str r14, [sp, #4 + 6*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u6 * R1 - umlal r3, r4, r0, r14 - - /* I interleaved with J */ - ldr r7, [r1, #0*4] @ a[0]*2 - ldr r0, [r1, #1*4] @ a[1]*2 - mov r7, r7, asl #1 - ldr r8, [r1, #7*4] @ a[7] - ldr r2, [r1, #8*4] @ a[8] - umlal r3, r4, r7, r8 @ c += a[0]*2 * a[7] - ldr r14, [r1, #6*4] @ a[6] - mov r0, r0, asl #1 - umull r11, r12, r7, r2 @ c' = a[0]*2 * a[8] - ldr r7, [r1, #2*4] @ a[2]*2 - umlal r11, r12, r0, r8 @ c' += a[1]*2 * a[7] - ldr r8, [r1, #5*4] @ a[5] - umlal r3, r4, r0, r14 @ c += a[1]*2 * a[6] - ldr r0, [r1, #3*4] @ a[3]*2 - mov r7, r7, asl #1 - umlal r11, r12, r7, r14 @ c' += a[2]*2 * a[6] - ldr r14, [r1, #4*4] @ a[4] - mov r0, r0, asl #1 - umlal r3, r4, r7, r8 @ c += a[2]*2 * a[5] - mov r2, r2, asl #1 @ a[8]*2 - umlal r11, r12, r0, r8 @ c' += a[3]*2 * a[5] - umlal r3, r4, r0, r14 @ c += a[3]*2 * a[4] - umlal r11, r12, r14, r14 @ c' += a[4] * a[4] - ldr r8, [r1, #9*4] @ a[9] - umlal r5, r6, r2, r8 @ d += a[8]*2 * a[9] - @ r8 will be used in J - - bic r0, r5, field_not_M @ u7 = d & M - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u7 * R0 - umlal r3, r4, r0, r14 - bic r14, r3, field_not_M @ t7 = c & M - str r14, [sp, #4 + 7*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u7 * R1 - umlal r3, r4, r0, r14 - - /* J */ - adds r3, r3, r11 @ c += c' - adc r4, r4, r12 - umlal r5, r6, r8, r8 @ d += a[9] * a[9] - - bic r0, r5, field_not_M @ u8 = d & M - str r0, [sp, #4 + 8*4] - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - movw r14, field_R0 @ c += u8 * R0 - umlal r3, r4, r0, r14 - - /****************************************** - * compute and write back result - ****************************************** - Allocation: - r0 r - r3:r4 c - r5:r6 d - r7 t0 - r8 t1 - r9 t2 - r11 u8 - r12 t9 - r1,r2,r10,r14 scratch - - Note: do not read from a[] after here, it may overlap with r[] - */ - ldr r0, [sp, #0] - add r1, sp, #4 + 3*4 @ r[3..7] = t3..7, r11=u8, r12=t9 - ldmia r1, {r2,r7,r8,r9,r10,r11,r12} - add r1, r0, #3*4 - stmia r1, {r2,r7,r8,r9,r10} - - bic r2, r3, field_not_M @ r[8] = c & M - str r2, [r0, #8*4] - mov r3, r3, lsr #26 @ c >>= 26 - orr r3, r3, r4, asl #6 - mov r4, r4, lsr #26 - mov r14, field_R1 @ c += u8 * R1 - umlal r3, r4, r11, r14 - movw r14, field_R0 @ c += d * R0 - umlal r3, r4, r5, r14 - adds r3, r3, r12 @ c += t9 - adc r4, r4, #0 - - add r1, sp, #4 + 0*4 @ r7,r8,r9 = t0,t1,t2 - ldmia r1, {r7,r8,r9} - - ubfx r2, r3, #0, #22 @ r[9] = c & (M >> 4) - str r2, [r0, #9*4] - mov r3, r3, lsr #22 @ c >>= 22 - orr r3, r3, r4, asl #10 - mov r4, r4, lsr #22 - movw r14, field_R1 << 4 @ c += d * (R1 << 4) - umlal r3, r4, r5, r14 - - movw r14, field_R0 >> 4 @ d = c * (R0 >> 4) + t0 (64x64 multiply+add) - umull r5, r6, r3, r14 @ d = c.lo * (R0 >> 4) - adds r5, r5, r7 @ d.lo += t0 - mla r6, r14, r4, r6 @ d.hi += c.hi * (R0 >> 4) - adc r6, r6, 0 @ d.hi += carry - - bic r2, r5, field_not_M @ r[0] = d & M - str r2, [r0, #0*4] - - mov r5, r5, lsr #26 @ d >>= 26 - orr r5, r5, r6, asl #6 - mov r6, r6, lsr #26 - - movw r14, field_R1 >> 4 @ d += c * (R1 >> 4) + t1 (64x64 multiply+add) - umull r1, r2, r3, r14 @ tmp = c.lo * (R1 >> 4) - adds r5, r5, r8 @ d.lo += t1 - adc r6, r6, #0 @ d.hi += carry - adds r5, r5, r1 @ d.lo += tmp.lo - mla r2, r14, r4, r2 @ tmp.hi += c.hi * (R1 >> 4) - adc r6, r6, r2 @ d.hi += carry + tmp.hi - - bic r2, r5, field_not_M @ r[1] = d & M - str r2, [r0, #1*4] - mov r5, r5, lsr #26 @ d >>= 26 (ignore hi) - orr r5, r5, r6, asl #6 - - add r5, r5, r9 @ d += t2 - str r5, [r0, #2*4] @ r[2] = d - - add sp, sp, #48 - ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc} - .size secp256k1_fe_sqr_inner, .-secp256k1_fe_sqr_inner - diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/basic-config.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/basic-config.h deleted file mode 100644 index c4c16eb7c..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/basic-config.h +++ /dev/null @@ -1,32 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_BASIC_CONFIG_ -#define _SECP256K1_BASIC_CONFIG_ - -#ifdef USE_BASIC_CONFIG - -#undef USE_ASM_X86_64 -#undef USE_ENDOMORPHISM -#undef USE_FIELD_10X26 -#undef USE_FIELD_5X52 -#undef USE_FIELD_INV_BUILTIN -#undef USE_FIELD_INV_NUM -#undef USE_NUM_GMP -#undef USE_NUM_NONE -#undef USE_SCALAR_4X64 -#undef USE_SCALAR_8X32 -#undef USE_SCALAR_INV_BUILTIN -#undef USE_SCALAR_INV_NUM - -#define USE_NUM_NONE 1 -#define USE_FIELD_INV_BUILTIN 1 -#define USE_SCALAR_INV_BUILTIN 1 -#define USE_FIELD_10X26 1 -#define USE_SCALAR_8X32 1 - -#endif // USE_BASIC_CONFIG -#endif // _SECP256K1_BASIC_CONFIG_ diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench.h deleted file mode 100644 index 3a71b4aaf..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench.h +++ /dev/null @@ -1,66 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_BENCH_H_ -#define _SECP256K1_BENCH_H_ - -#include -#include -#include "sys/time.h" - -static double gettimedouble(void) { - struct timeval tv; - gettimeofday(&tv, NULL); - return tv.tv_usec * 0.000001 + tv.tv_sec; -} - -void print_number(double x) { - double y = x; - int c = 0; - if (y < 0.0) { - y = -y; - } - while (y < 100.0) { - y *= 10.0; - c++; - } - printf("%.*f", c, x); -} - -void run_benchmark(char *name, void (*benchmark)(void*), void (*setup)(void*), void (*teardown)(void*), void* data, int count, int iter) { - int i; - double min = HUGE_VAL; - double sum = 0.0; - double max = 0.0; - for (i = 0; i < count; i++) { - double begin, total; - if (setup != NULL) { - setup(data); - } - begin = gettimedouble(); - benchmark(data); - total = gettimedouble() - begin; - if (teardown != NULL) { - teardown(data); - } - if (total < min) { - min = total; - } - if (total > max) { - max = total; - } - sum += total; - } - printf("%s: min ", name); - print_number(min * 1000000.0 / iter); - printf("us / avg "); - print_number((sum / count) * 1000000.0 / iter); - printf("us / max "); - print_number(max * 1000000.0 / iter); - printf("us\n"); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_ecdh.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_ecdh.c deleted file mode 100644 index cde5e2dbb..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_ecdh.c +++ /dev/null @@ -1,54 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include - -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" -#include "util.h" -#include "bench.h" - -typedef struct { - secp256k1_context *ctx; - secp256k1_pubkey point; - unsigned char scalar[32]; -} bench_ecdh_t; - -static void bench_ecdh_setup(void* arg) { - int i; - bench_ecdh_t *data = (bench_ecdh_t*)arg; - const unsigned char point[] = { - 0x03, - 0x54, 0x94, 0xc1, 0x5d, 0x32, 0x09, 0x97, 0x06, - 0xc2, 0x39, 0x5f, 0x94, 0x34, 0x87, 0x45, 0xfd, - 0x75, 0x7c, 0xe3, 0x0e, 0x4e, 0x8c, 0x90, 0xfb, - 0xa2, 0xba, 0xd1, 0x84, 0xf8, 0x83, 0xc6, 0x9f - }; - - /* create a context with no capabilities */ - data->ctx = secp256k1_context_create(SECP256K1_FLAGS_TYPE_CONTEXT); - for (i = 0; i < 32; i++) { - data->scalar[i] = i + 1; - } - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &data->point, point, sizeof(point)) == 1); -} - -static void bench_ecdh(void* arg) { - int i; - unsigned char res[32]; - bench_ecdh_t *data = (bench_ecdh_t*)arg; - - for (i = 0; i < 20000; i++) { - CHECK(secp256k1_ecdh(data->ctx, res, &data->point, data->scalar) == 1); - } -} - -int main(void) { - bench_ecdh_t data; - - run_benchmark("ecdh", bench_ecdh, bench_ecdh_setup, NULL, &data, 10, 20000); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_internal.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_internal.c deleted file mode 100644 index 0809f77bd..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_internal.c +++ /dev/null @@ -1,382 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ -#include - -#include "include/secp256k1.h" - -#include "util.h" -#include "hash_impl.h" -#include "num_impl.h" -#include "field_impl.h" -#include "group_impl.h" -#include "scalar_impl.h" -#include "ecmult_const_impl.h" -#include "ecmult_impl.h" -#include "bench.h" -#include "secp256k1.c" - -typedef struct { - secp256k1_scalar scalar_x, scalar_y; - secp256k1_fe fe_x, fe_y; - secp256k1_ge ge_x, ge_y; - secp256k1_gej gej_x, gej_y; - unsigned char data[64]; - int wnaf[256]; -} bench_inv_t; - -void bench_setup(void* arg) { - bench_inv_t *data = (bench_inv_t*)arg; - - static const unsigned char init_x[32] = { - 0x02, 0x03, 0x05, 0x07, 0x0b, 0x0d, 0x11, 0x13, - 0x17, 0x1d, 0x1f, 0x25, 0x29, 0x2b, 0x2f, 0x35, - 0x3b, 0x3d, 0x43, 0x47, 0x49, 0x4f, 0x53, 0x59, - 0x61, 0x65, 0x67, 0x6b, 0x6d, 0x71, 0x7f, 0x83 - }; - - static const unsigned char init_y[32] = { - 0x82, 0x83, 0x85, 0x87, 0x8b, 0x8d, 0x81, 0x83, - 0x97, 0xad, 0xaf, 0xb5, 0xb9, 0xbb, 0xbf, 0xc5, - 0xdb, 0xdd, 0xe3, 0xe7, 0xe9, 0xef, 0xf3, 0xf9, - 0x11, 0x15, 0x17, 0x1b, 0x1d, 0xb1, 0xbf, 0xd3 - }; - - secp256k1_scalar_set_b32(&data->scalar_x, init_x, NULL); - secp256k1_scalar_set_b32(&data->scalar_y, init_y, NULL); - secp256k1_fe_set_b32(&data->fe_x, init_x); - secp256k1_fe_set_b32(&data->fe_y, init_y); - CHECK(secp256k1_ge_set_xo_var(&data->ge_x, &data->fe_x, 0)); - CHECK(secp256k1_ge_set_xo_var(&data->ge_y, &data->fe_y, 1)); - secp256k1_gej_set_ge(&data->gej_x, &data->ge_x); - secp256k1_gej_set_ge(&data->gej_y, &data->ge_y); - memcpy(data->data, init_x, 32); - memcpy(data->data + 32, init_y, 32); -} - -void bench_scalar_add(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000000; i++) { - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - -void bench_scalar_negate(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000000; i++) { - secp256k1_scalar_negate(&data->scalar_x, &data->scalar_x); - } -} - -void bench_scalar_sqr(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_scalar_sqr(&data->scalar_x, &data->scalar_x); - } -} - -void bench_scalar_mul(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_scalar_mul(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - -#ifdef USE_ENDOMORPHISM -void bench_scalar_split(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_scalar l, r; - secp256k1_scalar_split_lambda(&l, &r, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} -#endif - -void bench_scalar_inverse(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000; i++) { - secp256k1_scalar_inverse(&data->scalar_x, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - -void bench_scalar_inverse_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000; i++) { - secp256k1_scalar_inverse_var(&data->scalar_x, &data->scalar_x); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - -void bench_field_normalize(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000000; i++) { - secp256k1_fe_normalize(&data->fe_x); - } -} - -void bench_field_normalize_weak(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 2000000; i++) { - secp256k1_fe_normalize_weak(&data->fe_x); - } -} - -void bench_field_mul(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_fe_mul(&data->fe_x, &data->fe_x, &data->fe_y); - } -} - -void bench_field_sqr(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_fe_sqr(&data->fe_x, &data->fe_x); - } -} - -void bench_field_inverse(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_fe_inv(&data->fe_x, &data->fe_x); - secp256k1_fe_add(&data->fe_x, &data->fe_y); - } -} - -void bench_field_inverse_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_fe_inv_var(&data->fe_x, &data->fe_x); - secp256k1_fe_add(&data->fe_x, &data->fe_y); - } -} - -void bench_field_sqrt(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_fe_sqrt(&data->fe_x, &data->fe_x); - secp256k1_fe_add(&data->fe_x, &data->fe_y); - } -} - -void bench_group_double_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_gej_double_var(&data->gej_x, &data->gej_x, NULL); - } -} - -void bench_group_add_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_gej_add_var(&data->gej_x, &data->gej_x, &data->gej_y, NULL); - } -} - -void bench_group_add_affine(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_gej_add_ge(&data->gej_x, &data->gej_x, &data->ge_y); - } -} - -void bench_group_add_affine_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 200000; i++) { - secp256k1_gej_add_ge_var(&data->gej_x, &data->gej_x, &data->ge_y, NULL); - } -} - -void bench_group_jacobi_var(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_gej_has_quad_y_var(&data->gej_x); - } -} - -void bench_ecmult_wnaf(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_ecmult_wnaf(data->wnaf, 256, &data->scalar_x, WINDOW_A); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - -void bench_wnaf_const(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A); - secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); - } -} - - -void bench_sha256(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - secp256k1_sha256_t sha; - - for (i = 0; i < 20000; i++) { - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, data->data, 32); - secp256k1_sha256_finalize(&sha, data->data); - } -} - -void bench_hmac_sha256(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - secp256k1_hmac_sha256_t hmac; - - for (i = 0; i < 20000; i++) { - secp256k1_hmac_sha256_initialize(&hmac, data->data, 32); - secp256k1_hmac_sha256_write(&hmac, data->data, 32); - secp256k1_hmac_sha256_finalize(&hmac, data->data); - } -} - -void bench_rfc6979_hmac_sha256(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - secp256k1_rfc6979_hmac_sha256_t rng; - - for (i = 0; i < 20000; i++) { - secp256k1_rfc6979_hmac_sha256_initialize(&rng, data->data, 64); - secp256k1_rfc6979_hmac_sha256_generate(&rng, data->data, 32); - } -} - -void bench_context_verify(void* arg) { - int i; - (void)arg; - for (i = 0; i < 20; i++) { - secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_VERIFY)); - } -} - -void bench_context_sign(void* arg) { - int i; - (void)arg; - for (i = 0; i < 200; i++) { - secp256k1_context_destroy(secp256k1_context_create(SECP256K1_CONTEXT_SIGN)); - } -} - -#ifndef USE_NUM_NONE -void bench_num_jacobi(void* arg) { - int i; - bench_inv_t *data = (bench_inv_t*)arg; - secp256k1_num nx, norder; - - secp256k1_scalar_get_num(&nx, &data->scalar_x); - secp256k1_scalar_order_get_num(&norder); - secp256k1_scalar_get_num(&norder, &data->scalar_y); - - for (i = 0; i < 200000; i++) { - secp256k1_num_jacobi(&nx, &norder); - } -} -#endif - -int have_flag(int argc, char** argv, char *flag) { - char** argm = argv + argc; - argv++; - if (argv == argm) { - return 1; - } - while (argv != NULL && argv != argm) { - if (strcmp(*argv, flag) == 0) { - return 1; - } - argv++; - } - return 0; -} - -int main(int argc, char **argv) { - bench_inv_t data; - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, 2000000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, 2000000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, 200000); -#ifdef USE_ENDOMORPHISM - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, 20000); -#endif - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); - - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, 2000000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, 2000000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqr")) run_benchmark("field_sqr", bench_field_sqr, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "mul")) run_benchmark("field_mul", bench_field_mul, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse", bench_field_inverse, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "inverse")) run_benchmark("field_inverse_var", bench_field_inverse_var, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "field") || have_flag(argc, argv, "sqrt")) run_benchmark("field_sqrt", bench_field_sqrt, bench_setup, NULL, &data, 10, 20000); - - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "double")) run_benchmark("group_double_var", bench_group_double_var, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_var", bench_group_add_var, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine", bench_group_add_affine, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "add")) run_benchmark("group_add_affine_var", bench_group_add_affine_var, bench_setup, NULL, &data, 10, 200000); - if (have_flag(argc, argv, "group") || have_flag(argc, argv, "jacobi")) run_benchmark("group_jacobi_var", bench_group_jacobi_var, bench_setup, NULL, &data, 10, 20000); - - if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("wnaf_const", bench_wnaf_const, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "ecmult") || have_flag(argc, argv, "wnaf")) run_benchmark("ecmult_wnaf", bench_ecmult_wnaf, bench_setup, NULL, &data, 10, 20000); - - if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "sha256")) run_benchmark("hash_sha256", bench_sha256, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "hmac")) run_benchmark("hash_hmac_sha256", bench_hmac_sha256, bench_setup, NULL, &data, 10, 20000); - if (have_flag(argc, argv, "hash") || have_flag(argc, argv, "rng6979")) run_benchmark("hash_rfc6979_hmac_sha256", bench_rfc6979_hmac_sha256, bench_setup, NULL, &data, 10, 20000); - - if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 20); - if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 200); - -#ifndef USE_NUM_NONE - if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, 200000); -#endif - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_recover.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_recover.c deleted file mode 100644 index 6489378cc..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_recover.c +++ /dev/null @@ -1,60 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include "include/secp256k1.h" -#include "include/secp256k1_recovery.h" -#include "util.h" -#include "bench.h" - -typedef struct { - secp256k1_context *ctx; - unsigned char msg[32]; - unsigned char sig[64]; -} bench_recover_t; - -void bench_recover(void* arg) { - int i; - bench_recover_t *data = (bench_recover_t*)arg; - secp256k1_pubkey pubkey; - unsigned char pubkeyc[33]; - - for (i = 0; i < 20000; i++) { - int j; - size_t pubkeylen = 33; - secp256k1_ecdsa_recoverable_signature sig; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(data->ctx, &sig, data->sig, i % 2)); - CHECK(secp256k1_ecdsa_recover(data->ctx, &pubkey, &sig, data->msg)); - CHECK(secp256k1_ec_pubkey_serialize(data->ctx, pubkeyc, &pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); - for (j = 0; j < 32; j++) { - data->sig[j + 32] = data->msg[j]; /* Move former message to S. */ - data->msg[j] = data->sig[j]; /* Move former R to message. */ - data->sig[j] = pubkeyc[j + 1]; /* Move recovered pubkey X coordinate to R (which must be a valid X coordinate). */ - } - } -} - -void bench_recover_setup(void* arg) { - int i; - bench_recover_t *data = (bench_recover_t*)arg; - - for (i = 0; i < 32; i++) { - data->msg[i] = 1 + i; - } - for (i = 0; i < 64; i++) { - data->sig[i] = 65 + i; - } -} - -int main(void) { - bench_recover_t data; - - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - - run_benchmark("ecdsa_recover", bench_recover, bench_recover_setup, NULL, &data, 10, 20000); - - secp256k1_context_destroy(data.ctx); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_schnorr_verify.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_schnorr_verify.c deleted file mode 100644 index 5f137dda2..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_schnorr_verify.c +++ /dev/null @@ -1,73 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include -#include - -#include "include/secp256k1.h" -#include "include/secp256k1_schnorr.h" -#include "util.h" -#include "bench.h" - -typedef struct { - unsigned char key[32]; - unsigned char sig[64]; - unsigned char pubkey[33]; - size_t pubkeylen; -} benchmark_schnorr_sig_t; - -typedef struct { - secp256k1_context *ctx; - unsigned char msg[32]; - benchmark_schnorr_sig_t sigs[64]; - int numsigs; -} benchmark_schnorr_verify_t; - -static void benchmark_schnorr_init(void* arg) { - int i, k; - benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; - - for (i = 0; i < 32; i++) { - data->msg[i] = 1 + i; - } - for (k = 0; k < data->numsigs; k++) { - secp256k1_pubkey pubkey; - for (i = 0; i < 32; i++) { - data->sigs[k].key[i] = 33 + i + k; - } - secp256k1_schnorr_sign(data->ctx, data->sigs[k].sig, data->msg, data->sigs[k].key, NULL, NULL); - data->sigs[k].pubkeylen = 33; - CHECK(secp256k1_ec_pubkey_create(data->ctx, &pubkey, data->sigs[k].key)); - CHECK(secp256k1_ec_pubkey_serialize(data->ctx, data->sigs[k].pubkey, &data->sigs[k].pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED)); - } -} - -static void benchmark_schnorr_verify(void* arg) { - int i; - benchmark_schnorr_verify_t* data = (benchmark_schnorr_verify_t*)arg; - - for (i = 0; i < 20000 / data->numsigs; i++) { - secp256k1_pubkey pubkey; - data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->sigs[0].pubkey, data->sigs[0].pubkeylen)); - CHECK(secp256k1_schnorr_verify(data->ctx, data->sigs[0].sig, data->msg, &pubkey) == ((i & 0xFF) == 0)); - data->sigs[0].sig[(i >> 8) % 64] ^= (i & 0xFF); - } -} - - - -int main(void) { - benchmark_schnorr_verify_t data; - - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - data.numsigs = 1; - run_benchmark("schnorr_verify", benchmark_schnorr_verify, benchmark_schnorr_init, NULL, &data, 10, 20000); - - secp256k1_context_destroy(data.ctx); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_sign.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_sign.c deleted file mode 100644 index ed7224d75..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_sign.c +++ /dev/null @@ -1,56 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include "include/secp256k1.h" -#include "util.h" -#include "bench.h" - -typedef struct { - secp256k1_context* ctx; - unsigned char msg[32]; - unsigned char key[32]; -} bench_sign_t; - -static void bench_sign_setup(void* arg) { - int i; - bench_sign_t *data = (bench_sign_t*)arg; - - for (i = 0; i < 32; i++) { - data->msg[i] = i + 1; - } - for (i = 0; i < 32; i++) { - data->key[i] = i + 65; - } -} - -static void bench_sign(void* arg) { - int i; - bench_sign_t *data = (bench_sign_t*)arg; - - unsigned char sig[74]; - for (i = 0; i < 20000; i++) { - size_t siglen = 74; - int j; - secp256k1_ecdsa_signature signature; - CHECK(secp256k1_ecdsa_sign(data->ctx, &signature, data->msg, data->key, NULL, NULL)); - CHECK(secp256k1_ecdsa_signature_serialize_der(data->ctx, sig, &siglen, &signature)); - for (j = 0; j < 32; j++) { - data->msg[j] = sig[j]; - data->key[j] = sig[j + 32]; - } - } -} - -int main(void) { - bench_sign_t data; - - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - - run_benchmark("ecdsa_sign", bench_sign, bench_sign_setup, NULL, &data, 10, 20000); - - secp256k1_context_destroy(data.ctx); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_verify.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_verify.c deleted file mode 100644 index 418defa0a..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/bench_verify.c +++ /dev/null @@ -1,112 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include -#include - -#include "include/secp256k1.h" -#include "util.h" -#include "bench.h" - -#ifdef ENABLE_OPENSSL_TESTS -#include -#include -#include -#endif - -typedef struct { - secp256k1_context *ctx; - unsigned char msg[32]; - unsigned char key[32]; - unsigned char sig[72]; - size_t siglen; - unsigned char pubkey[33]; - size_t pubkeylen; -#ifdef ENABLE_OPENSSL_TESTS - EC_GROUP* ec_group; -#endif -} benchmark_verify_t; - -static void benchmark_verify(void* arg) { - int i; - benchmark_verify_t* data = (benchmark_verify_t*)arg; - - for (i = 0; i < 20000; i++) { - secp256k1_pubkey pubkey; - secp256k1_ecdsa_signature sig; - data->sig[data->siglen - 1] ^= (i & 0xFF); - data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); - data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - CHECK(secp256k1_ec_pubkey_parse(data->ctx, &pubkey, data->pubkey, data->pubkeylen) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(data->ctx, &sig, data->sig, data->siglen) == 1); - CHECK(secp256k1_ecdsa_verify(data->ctx, &sig, data->msg, &pubkey) == (i == 0)); - data->sig[data->siglen - 1] ^= (i & 0xFF); - data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); - data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - } -} - -#ifdef ENABLE_OPENSSL_TESTS -static void benchmark_verify_openssl(void* arg) { - int i; - benchmark_verify_t* data = (benchmark_verify_t*)arg; - - for (i = 0; i < 20000; i++) { - data->sig[data->siglen - 1] ^= (i & 0xFF); - data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); - data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - { - EC_KEY *pkey = EC_KEY_new(); - const unsigned char *pubkey = &data->pubkey[0]; - int result; - - CHECK(pkey != NULL); - result = EC_KEY_set_group(pkey, data->ec_group); - CHECK(result); - result = (o2i_ECPublicKey(&pkey, &pubkey, data->pubkeylen)) != NULL; - CHECK(result); - result = ECDSA_verify(0, &data->msg[0], sizeof(data->msg), &data->sig[0], data->siglen, pkey) == (i == 0); - CHECK(result); - EC_KEY_free(pkey); - } - data->sig[data->siglen - 1] ^= (i & 0xFF); - data->sig[data->siglen - 2] ^= ((i >> 8) & 0xFF); - data->sig[data->siglen - 3] ^= ((i >> 16) & 0xFF); - } -} -#endif - -int main(void) { - int i; - secp256k1_pubkey pubkey; - secp256k1_ecdsa_signature sig; - benchmark_verify_t data; - - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - for (i = 0; i < 32; i++) { - data.msg[i] = 1 + i; - } - for (i = 0; i < 32; i++) { - data.key[i] = 33 + i; - } - data.siglen = 72; - CHECK(secp256k1_ecdsa_sign(data.ctx, &sig, data.msg, data.key, NULL, NULL)); - CHECK(secp256k1_ecdsa_signature_serialize_der(data.ctx, data.sig, &data.siglen, &sig)); - CHECK(secp256k1_ec_pubkey_create(data.ctx, &pubkey, data.key)); - data.pubkeylen = 33; - CHECK(secp256k1_ec_pubkey_serialize(data.ctx, data.pubkey, &data.pubkeylen, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - - run_benchmark("ecdsa_verify", benchmark_verify, NULL, NULL, &data, 10, 20000); -#ifdef ENABLE_OPENSSL_TESTS - data.ec_group = EC_GROUP_new_by_curve_name(NID_secp256k1); - run_benchmark("ecdsa_verify_openssl", benchmark_verify_openssl, NULL, NULL, &data, 10, 20000); - EC_GROUP_free(data.ec_group); -#endif - - secp256k1_context_destroy(data.ctx); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa.h deleted file mode 100644 index 54ae101b9..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa.h +++ /dev/null @@ -1,21 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECDSA_ -#define _SECP256K1_ECDSA_ - -#include - -#include "scalar.h" -#include "group.h" -#include "ecmult.h" - -static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *r, secp256k1_scalar *s, const unsigned char *sig, size_t size); -static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar *r, const secp256k1_scalar *s); -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar* r, const secp256k1_scalar* s, const secp256k1_ge *pubkey, const secp256k1_scalar *message); -static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa_impl.h deleted file mode 100644 index 453bb1188..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecdsa_impl.h +++ /dev/null @@ -1,315 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - - -#ifndef _SECP256K1_ECDSA_IMPL_H_ -#define _SECP256K1_ECDSA_IMPL_H_ - -#include "scalar.h" -#include "field.h" -#include "group.h" -#include "ecmult.h" -#include "ecmult_gen.h" -#include "ecdsa.h" - -/** Group order for secp256k1 defined as 'n' in "Standards for Efficient Cryptography" (SEC2) 2.7.1 - * sage: for t in xrange(1023, -1, -1): - * .. p = 2**256 - 2**32 - t - * .. if p.is_prime(): - * .. print '%x'%p - * .. break - * 'fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f' - * sage: a = 0 - * sage: b = 7 - * sage: F = FiniteField (p) - * sage: '%x' % (EllipticCurve ([F (a), F (b)]).order()) - * 'fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141' - */ -static const secp256k1_fe secp256k1_ecdsa_const_order_as_fe = SECP256K1_FE_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, - 0xBAAEDCE6UL, 0xAF48A03BUL, 0xBFD25E8CUL, 0xD0364141UL -); - -/** Difference between field and order, values 'p' and 'n' values defined in - * "Standards for Efficient Cryptography" (SEC2) 2.7.1. - * sage: p = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F - * sage: a = 0 - * sage: b = 7 - * sage: F = FiniteField (p) - * sage: '%x' % (p - EllipticCurve ([F (a), F (b)]).order()) - * '14551231950b75fc4402da1722fc9baee' - */ -static const secp256k1_fe secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CONST( - 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL -); - -static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned char *sigend) { - int lenleft, b1; - size_t ret = 0; - if (*sigp >= sigend) { - return -1; - } - b1 = *((*sigp)++); - if (b1 == 0xFF) { - /* X.690-0207 8.1.3.5.c the value 0xFF shall not be used. */ - return -1; - } - if ((b1 & 0x80) == 0) { - /* X.690-0207 8.1.3.4 short form length octets */ - return b1; - } - if (b1 == 0x80) { - /* Indefinite length is not allowed in DER. */ - return -1; - } - /* X.690-207 8.1.3.5 long form length octets */ - lenleft = b1 & 0x7F; - if (lenleft > sigend - *sigp) { - return -1; - } - if (**sigp == 0) { - /* Not the shortest possible length encoding. */ - return -1; - } - if ((size_t)lenleft > sizeof(size_t)) { - /* The resulting length would exceed the range of a size_t, so - * certainly longer than the passed array size. - */ - return -1; - } - while (lenleft > 0) { - if ((ret >> ((sizeof(size_t) - 1) * 8)) != 0) { - } - ret = (ret << 8) | **sigp; - if (ret + lenleft > (size_t)(sigend - *sigp)) { - /* Result exceeds the length of the passed array. */ - return -1; - } - (*sigp)++; - lenleft--; - } - if (ret < 128) { - /* Not the shortest possible length encoding. */ - return -1; - } - return ret; -} - -static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char **sig, const unsigned char *sigend) { - int overflow = 0; - unsigned char ra[32] = {0}; - int rlen; - - if (*sig == sigend || **sig != 0x02) { - /* Not a primitive integer (X.690-0207 8.3.1). */ - return 0; - } - (*sig)++; - rlen = secp256k1_der_read_len(sig, sigend); - if (rlen <= 0 || (*sig) + rlen > sigend) { - /* Exceeds bounds or not at least length 1 (X.690-0207 8.3.1). */ - return 0; - } - if (**sig == 0x00 && rlen > 1 && (((*sig)[1]) & 0x80) == 0x00) { - /* Excessive 0x00 padding. */ - return 0; - } - if (**sig == 0xFF && rlen > 1 && (((*sig)[1]) & 0x80) == 0x80) { - /* Excessive 0xFF padding. */ - return 0; - } - if ((**sig & 0x80) == 0x80) { - /* Negative. */ - overflow = 1; - } - while (rlen > 0 && **sig == 0) { - /* Skip leading zero bytes */ - rlen--; - (*sig)++; - } - if (rlen > 32) { - overflow = 1; - } - if (!overflow) { - memcpy(ra + 32 - rlen, *sig, rlen); - secp256k1_scalar_set_b32(r, ra, &overflow); - } - if (overflow) { - secp256k1_scalar_set_int(r, 0); - } - (*sig) += rlen; - return 1; -} - -static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, const unsigned char *sig, size_t size) { - const unsigned char *sigend = sig + size; - int rlen; - if (sig == sigend || *(sig++) != 0x30) { - /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ - return 0; - } - rlen = secp256k1_der_read_len(&sig, sigend); - if (rlen < 0 || sig + rlen > sigend) { - /* Tuple exceeds bounds */ - return 0; - } - if (sig + rlen != sigend) { - /* Garbage after tuple. */ - return 0; - } - - if (!secp256k1_der_parse_integer(rr, &sig, sigend)) { - return 0; - } - if (!secp256k1_der_parse_integer(rs, &sig, sigend)) { - return 0; - } - - if (sig != sigend) { - /* Trailing garbage inside tuple. */ - return 0; - } - - return 1; -} - -static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, size_t *size, const secp256k1_scalar* ar, const secp256k1_scalar* as) { - unsigned char r[33] = {0}, s[33] = {0}; - unsigned char *rp = r, *sp = s; - size_t lenR = 33, lenS = 33; - secp256k1_scalar_get_b32(&r[1], ar); - secp256k1_scalar_get_b32(&s[1], as); - while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; } - while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; } - if (*size < 6+lenS+lenR) { - *size = 6 + lenS + lenR; - return 0; - } - *size = 6 + lenS + lenR; - sig[0] = 0x30; - sig[1] = 4 + lenS + lenR; - sig[2] = 0x02; - sig[3] = lenR; - memcpy(sig+4, rp, lenR); - sig[4+lenR] = 0x02; - sig[5+lenR] = lenS; - memcpy(sig+lenR+6, sp, lenS); - return 1; -} - -static int secp256k1_ecdsa_sig_verify(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar *sigs, const secp256k1_ge *pubkey, const secp256k1_scalar *message) { - unsigned char c[32]; - secp256k1_scalar sn, u1, u2; -#if !defined(EXHAUSTIVE_TEST_ORDER) - secp256k1_fe xr; -#endif - secp256k1_gej pubkeyj; - secp256k1_gej pr; - - if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { - return 0; - } - - secp256k1_scalar_inverse_var(&sn, sigs); - secp256k1_scalar_mul(&u1, &sn, message); - secp256k1_scalar_mul(&u2, &sn, sigr); - secp256k1_gej_set_ge(&pubkeyj, pubkey); - secp256k1_ecmult(ctx, &pr, &pubkeyj, &u2, &u1); - if (secp256k1_gej_is_infinity(&pr)) { - return 0; - } - -#if defined(EXHAUSTIVE_TEST_ORDER) -{ - secp256k1_scalar computed_r; - secp256k1_ge pr_ge; - secp256k1_ge_set_gej(&pr_ge, &pr); - secp256k1_fe_normalize(&pr_ge.x); - - secp256k1_fe_get_b32(c, &pr_ge.x); - secp256k1_scalar_set_b32(&computed_r, c, NULL); - return secp256k1_scalar_eq(sigr, &computed_r); -} -#else - secp256k1_scalar_get_b32(c, sigr); - secp256k1_fe_set_b32(&xr, c); - - /** We now have the recomputed R point in pr, and its claimed x coordinate (modulo n) - * in xr. Naively, we would extract the x coordinate from pr (requiring a inversion modulo p), - * compute the remainder modulo n, and compare it to xr. However: - * - * xr == X(pr) mod n - * <=> exists h. (xr + h * n < p && xr + h * n == X(pr)) - * [Since 2 * n > p, h can only be 0 or 1] - * <=> (xr == X(pr)) || (xr + n < p && xr + n == X(pr)) - * [In Jacobian coordinates, X(pr) is pr.x / pr.z^2 mod p] - * <=> (xr == pr.x / pr.z^2 mod p) || (xr + n < p && xr + n == pr.x / pr.z^2 mod p) - * [Multiplying both sides of the equations by pr.z^2 mod p] - * <=> (xr * pr.z^2 mod p == pr.x) || (xr + n < p && (xr + n) * pr.z^2 mod p == pr.x) - * - * Thus, we can avoid the inversion, but we have to check both cases separately. - * secp256k1_gej_eq_x implements the (xr * pr.z^2 mod p == pr.x) test. - */ - if (secp256k1_gej_eq_x_var(&xr, &pr)) { - /* xr * pr.z^2 mod p == pr.x, so the signature is valid. */ - return 1; - } - if (secp256k1_fe_cmp_var(&xr, &secp256k1_ecdsa_const_p_minus_order) >= 0) { - /* xr + n >= p, so we can skip testing the second case. */ - return 0; - } - secp256k1_fe_add(&xr, &secp256k1_ecdsa_const_order_as_fe); - if (secp256k1_gej_eq_x_var(&xr, &pr)) { - /* (xr + n) * pr.z^2 mod p == pr.x, so the signature is valid. */ - return 1; - } - return 0; -#endif -} - -static int secp256k1_ecdsa_sig_sign(const secp256k1_ecmult_gen_context *ctx, secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *seckey, const secp256k1_scalar *message, const secp256k1_scalar *nonce, int *recid) { - unsigned char b[32]; - secp256k1_gej rp; - secp256k1_ge r; - secp256k1_scalar n; - int overflow = 0; - - secp256k1_ecmult_gen(ctx, &rp, nonce); - secp256k1_ge_set_gej(&r, &rp); - secp256k1_fe_normalize(&r.x); - secp256k1_fe_normalize(&r.y); - secp256k1_fe_get_b32(b, &r.x); - secp256k1_scalar_set_b32(sigr, b, &overflow); - /* These two conditions should be checked before calling */ - VERIFY_CHECK(!secp256k1_scalar_is_zero(sigr)); - VERIFY_CHECK(overflow == 0); - - if (recid) { - /* The overflow condition is cryptographically unreachable as hitting it requires finding the discrete log - * of some P where P.x >= order, and only 1 in about 2^127 points meet this criteria. - */ - *recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0); - } - secp256k1_scalar_mul(&n, sigr, seckey); - secp256k1_scalar_add(&n, &n, message); - secp256k1_scalar_inverse(sigs, nonce); - secp256k1_scalar_mul(sigs, sigs, &n); - secp256k1_scalar_clear(&n); - secp256k1_gej_clear(&rp); - secp256k1_ge_clear(&r); - if (secp256k1_scalar_is_zero(sigs)) { - return 0; - } - if (secp256k1_scalar_is_high(sigs)) { - secp256k1_scalar_negate(sigs, sigs); - if (recid) { - *recid ^= 1; - } - } - return 1; -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey.h deleted file mode 100644 index 42739a3be..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey.h +++ /dev/null @@ -1,25 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECKEY_ -#define _SECP256K1_ECKEY_ - -#include - -#include "group.h" -#include "scalar.h" -#include "ecmult.h" -#include "ecmult_gen.h" - -static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size); -static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed); - -static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak); -static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey_impl.h deleted file mode 100644 index ce38071ac..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/eckey_impl.h +++ /dev/null @@ -1,99 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECKEY_IMPL_H_ -#define _SECP256K1_ECKEY_IMPL_H_ - -#include "eckey.h" - -#include "scalar.h" -#include "field.h" -#include "group.h" -#include "ecmult_gen.h" - -static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char *pub, size_t size) { - if (size == 33 && (pub[0] == 0x02 || pub[0] == 0x03)) { - secp256k1_fe x; - return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == 0x03); - } else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) { - secp256k1_fe x, y; - if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) { - return 0; - } - secp256k1_ge_set_xy(elem, &x, &y); - if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07)) { - return 0; - } - return secp256k1_ge_is_valid_var(elem); - } else { - return 0; - } -} - -static int secp256k1_eckey_pubkey_serialize(secp256k1_ge *elem, unsigned char *pub, size_t *size, int compressed) { - if (secp256k1_ge_is_infinity(elem)) { - return 0; - } - secp256k1_fe_normalize_var(&elem->x); - secp256k1_fe_normalize_var(&elem->y); - secp256k1_fe_get_b32(&pub[1], &elem->x); - if (compressed) { - *size = 33; - pub[0] = 0x02 | (secp256k1_fe_is_odd(&elem->y) ? 0x01 : 0x00); - } else { - *size = 65; - pub[0] = 0x04; - secp256k1_fe_get_b32(&pub[33], &elem->y); - } - return 1; -} - -static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar *key, const secp256k1_scalar *tweak) { - secp256k1_scalar_add(key, key, tweak); - if (secp256k1_scalar_is_zero(key)) { - return 0; - } - return 1; -} - -static int secp256k1_eckey_pubkey_tweak_add(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) { - secp256k1_gej pt; - secp256k1_scalar one; - secp256k1_gej_set_ge(&pt, key); - secp256k1_scalar_set_int(&one, 1); - secp256k1_ecmult(ctx, &pt, &pt, &one, tweak); - - if (secp256k1_gej_is_infinity(&pt)) { - return 0; - } - secp256k1_ge_set_gej(key, &pt); - return 1; -} - -static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar *key, const secp256k1_scalar *tweak) { - if (secp256k1_scalar_is_zero(tweak)) { - return 0; - } - - secp256k1_scalar_mul(key, key, tweak); - return 1; -} - -static int secp256k1_eckey_pubkey_tweak_mul(const secp256k1_ecmult_context *ctx, secp256k1_ge *key, const secp256k1_scalar *tweak) { - secp256k1_scalar zero; - secp256k1_gej pt; - if (secp256k1_scalar_is_zero(tweak)) { - return 0; - } - - secp256k1_scalar_set_int(&zero, 0); - secp256k1_gej_set_ge(&pt, key); - secp256k1_ecmult(ctx, &pt, &pt, tweak, &zero); - secp256k1_ge_set_gej(key, &pt); - return 1; -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult.h deleted file mode 100644 index 20484134f..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult.h +++ /dev/null @@ -1,31 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_ -#define _SECP256K1_ECMULT_ - -#include "num.h" -#include "group.h" - -typedef struct { - /* For accelerating the computation of a*P + b*G: */ - secp256k1_ge_storage (*pre_g)[]; /* odd multiples of the generator */ -#ifdef USE_ENDOMORPHISM - secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ -#endif -} secp256k1_ecmult_context; - -static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx); -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb); -static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst, - const secp256k1_ecmult_context *src, const secp256k1_callback *cb); -static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx); -static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx); - -/** Double multiply: R = na*A + ng*G */ -static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const.h deleted file mode 100644 index 2b0097655..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const.h +++ /dev/null @@ -1,15 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_CONST_ -#define _SECP256K1_ECMULT_CONST_ - -#include "scalar.h" -#include "group.h" - -static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *q); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const_impl.h deleted file mode 100644 index 0db314c48..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_const_impl.h +++ /dev/null @@ -1,239 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Pieter Wuille, Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_CONST_IMPL_ -#define _SECP256K1_ECMULT_CONST_IMPL_ - -#include "scalar.h" -#include "group.h" -#include "ecmult_const.h" -#include "ecmult_impl.h" - -#ifdef USE_ENDOMORPHISM - #define WNAF_BITS 128 -#else - #define WNAF_BITS 256 -#endif -#define WNAF_SIZE(w) ((WNAF_BITS + (w) - 1) / (w)) - -/* This is like `ECMULT_TABLE_GET_GE` but is constant time */ -#define ECMULT_CONST_TABLE_GET_GE(r,pre,n,w) do { \ - int m; \ - int abs_n = (n) * (((n) > 0) * 2 - 1); \ - int idx_n = abs_n / 2; \ - secp256k1_fe neg_y; \ - VERIFY_CHECK(((n) & 1) == 1); \ - VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ - for (m = 0; m < ECMULT_TABLE_SIZE(w); m++) { \ - /* This loop is used to avoid secret data in array indices. See - * the comment in ecmult_gen_impl.h for rationale. */ \ - secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == idx_n); \ - secp256k1_fe_cmov(&(r)->y, &(pre)[m].y, m == idx_n); \ - } \ - (r)->infinity = 0; \ - secp256k1_fe_negate(&neg_y, &(r)->y, 1); \ - secp256k1_fe_cmov(&(r)->y, &neg_y, (n) != abs_n); \ -} while(0) - - -/** Convert a number to WNAF notation. The number becomes represented by sum(2^{wi} * wnaf[i], i=0..return_val) - * with the following guarantees: - * - each wnaf[i] an odd integer between -(1 << w) and (1 << w) - * - each wnaf[i] is nonzero - * - the number of words set is returned; this is always (WNAF_BITS + w - 1) / w - * - * Adapted from `The Width-w NAF Method Provides Small Memory and Fast Elliptic Scalar - * Multiplications Secure against Side Channel Attacks`, Okeya and Tagaki. M. Joye (Ed.) - * CT-RSA 2003, LNCS 2612, pp. 328-443, 2003. Springer-Verlagy Berlin Heidelberg 2003 - * - * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 - */ -static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) { - int global_sign; - int skew = 0; - int word = 0; - - /* 1 2 3 */ - int u_last; - int u; - - int flip; - int bit; - secp256k1_scalar neg_s; - int not_neg_one; - /* Note that we cannot handle even numbers by negating them to be odd, as is - * done in other implementations, since if our scalars were specified to have - * width < 256 for performance reasons, their negations would have width 256 - * and we'd lose any performance benefit. Instead, we use a technique from - * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even) - * or 2 (for odd) to the number we are encoding, returning a skew value indicating - * this, and having the caller compensate after doing the multiplication. */ - - /* Negative numbers will be negated to keep their bit representation below the maximum width */ - flip = secp256k1_scalar_is_high(&s); - /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ !secp256k1_scalar_is_even(&s); - /* We check for negative one, since adding 2 to it will cause an overflow */ - secp256k1_scalar_negate(&neg_s, &s); - not_neg_one = !secp256k1_scalar_is_one(&neg_s); - secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); - /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects - * that we added two to it and flipped it. In fact for -1 these operations are - * identical. We only flipped, but since skewing is required (in the sense that - * the skew must be 1 or 2, never zero) and flipping is not, we need to change - * our flags to claim that we only skewed. */ - global_sign = secp256k1_scalar_cond_negate(&s, flip); - global_sign *= not_neg_one * 2 - 1; - skew = 1 << bit; - - /* 4 */ - u_last = secp256k1_scalar_shr_int(&s, w); - while (word * w < WNAF_BITS) { - int sign; - int even; - - /* 4.1 4.4 */ - u = secp256k1_scalar_shr_int(&s, w); - /* 4.2 */ - even = ((u & 1) == 0); - sign = 2 * (u_last > 0) - 1; - u += sign * even; - u_last -= sign * even * (1 << w); - - /* 4.3, adapted for global sign change */ - wnaf[word++] = u_last * global_sign; - - u_last = u; - } - wnaf[word] = u * global_sign; - - VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); - VERIFY_CHECK(word == WNAF_SIZE(w)); - return skew; -} - - -static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, const secp256k1_scalar *scalar) { - secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_ge tmpa; - secp256k1_fe Z; - - int skew_1; - int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; -#ifdef USE_ENDOMORPHISM - secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - int wnaf_lam[1 + WNAF_SIZE(WINDOW_A - 1)]; - int skew_lam; - secp256k1_scalar q_1, q_lam; -#endif - - int i; - secp256k1_scalar sc = *scalar; - - /* build wnaf representation for q. */ -#ifdef USE_ENDOMORPHISM - /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); - skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1); - skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1); -#else - skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1); -#endif - - /* Calculate odd multiples of a. - * All multiples are brought to the same Z 'denominator', which is stored - * in Z. Due to secp256k1' isomorphism we can do all operations pretending - * that the Z coordinate was 1, use affine addition formulae, and correct - * the Z coordinate of the result once at the end. - */ - secp256k1_gej_set_ge(r, a); - secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, r); - for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_fe_normalize_weak(&pre_a[i].y); - } -#ifdef USE_ENDOMORPHISM - for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); - } -#endif - - /* first loop iteration (separated out so we can directly set r, rather - * than having it start at infinity, get doubled several times, then have - * its new value added to it) */ - i = wnaf_1[WNAF_SIZE(WINDOW_A - 1)]; - VERIFY_CHECK(i != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, i, WINDOW_A); - secp256k1_gej_set_ge(r, &tmpa); -#ifdef USE_ENDOMORPHISM - i = wnaf_lam[WNAF_SIZE(WINDOW_A - 1)]; - VERIFY_CHECK(i != 0); - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, i, WINDOW_A); - secp256k1_gej_add_ge(r, r, &tmpa); -#endif - /* remaining loop iterations */ - for (i = WNAF_SIZE(WINDOW_A - 1) - 1; i >= 0; i--) { - int n; - int j; - for (j = 0; j < WINDOW_A - 1; ++j) { - secp256k1_gej_double_nonzero(r, r, NULL); - } - - n = wnaf_1[i]; - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); - VERIFY_CHECK(n != 0); - secp256k1_gej_add_ge(r, r, &tmpa); -#ifdef USE_ENDOMORPHISM - n = wnaf_lam[i]; - ECMULT_CONST_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); - VERIFY_CHECK(n != 0); - secp256k1_gej_add_ge(r, r, &tmpa); -#endif - } - - secp256k1_fe_mul(&r->z, &r->z, &Z); - - { - /* Correct for wNAF skew */ - secp256k1_ge correction = *a; - secp256k1_ge_storage correction_1_stor; -#ifdef USE_ENDOMORPHISM - secp256k1_ge_storage correction_lam_stor; -#endif - secp256k1_ge_storage a2_stor; - secp256k1_gej tmpj; - secp256k1_gej_set_ge(&tmpj, &correction); - secp256k1_gej_double_var(&tmpj, &tmpj, NULL); - secp256k1_ge_set_gej(&correction, &tmpj); - secp256k1_ge_to_storage(&correction_1_stor, a); -#ifdef USE_ENDOMORPHISM - secp256k1_ge_to_storage(&correction_lam_stor, a); -#endif - secp256k1_ge_to_storage(&a2_stor, &correction); - - /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ - secp256k1_ge_storage_cmov(&correction_1_stor, &a2_stor, skew_1 == 2); -#ifdef USE_ENDOMORPHISM - secp256k1_ge_storage_cmov(&correction_lam_stor, &a2_stor, skew_lam == 2); -#endif - - /* Apply the correction */ - secp256k1_ge_from_storage(&correction, &correction_1_stor); - secp256k1_ge_neg(&correction, &correction); - secp256k1_gej_add_ge(r, r, &correction); - -#ifdef USE_ENDOMORPHISM - secp256k1_ge_from_storage(&correction, &correction_lam_stor); - secp256k1_ge_neg(&correction, &correction); - secp256k1_ge_mul_lambda(&correction, &correction); - secp256k1_gej_add_ge(r, r, &correction); -#endif - } -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen.h deleted file mode 100644 index eb2cc9ead..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen.h +++ /dev/null @@ -1,43 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_GEN_ -#define _SECP256K1_ECMULT_GEN_ - -#include "scalar.h" -#include "group.h" - -typedef struct { - /* For accelerating the computation of a*G: - * To harden against timing attacks, use the following mechanism: - * * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. - * * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: - * * U_i = U * 2^i (for i=0..62) - * * U_i = U * (1-2^63) (for i=63) - * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. - * For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is - * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). - * None of the resulting prec group elements have a known scalar, and neither do any of - * the intermediate sums while computing a*G. - */ - secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ - secp256k1_scalar blind; - secp256k1_gej initial; -} secp256k1_ecmult_gen_context; - -static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx); -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, const secp256k1_callback* cb); -static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst, - const secp256k1_ecmult_gen_context* src, const secp256k1_callback* cb); -static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx); -static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx); - -/** Multiply with the generator: R = a*G */ -static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context* ctx, secp256k1_gej *r, const secp256k1_scalar *a); - -static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen_impl.h deleted file mode 100644 index 35f254607..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_gen_impl.h +++ /dev/null @@ -1,210 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_GEN_IMPL_H_ -#define _SECP256K1_ECMULT_GEN_IMPL_H_ - -#include "scalar.h" -#include "group.h" -#include "ecmult_gen.h" -#include "hash_impl.h" -#ifdef USE_ECMULT_STATIC_PRECOMPUTATION -#include "ecmult_static_context.h" -#endif -static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) { - ctx->prec = NULL; -} - -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) { -#ifndef USE_ECMULT_STATIC_PRECOMPUTATION - secp256k1_ge prec[1024]; - secp256k1_gej gj; - secp256k1_gej nums_gej; - int i, j; -#endif - - if (ctx->prec != NULL) { - return; - } -#ifndef USE_ECMULT_STATIC_PRECOMPUTATION - ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec)); - - /* get the generator */ - secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); - - /* Construct a group element with no known corresponding scalar (nothing up my sleeve). */ - { - static const unsigned char nums_b32[33] = "The scalar for this x is unknown"; - secp256k1_fe nums_x; - secp256k1_ge nums_ge; - int r; - r = secp256k1_fe_set_b32(&nums_x, nums_b32); - (void)r; - VERIFY_CHECK(r); - r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0); - (void)r; - VERIFY_CHECK(r); - secp256k1_gej_set_ge(&nums_gej, &nums_ge); - /* Add G to make the bits in x uniformly distributed. */ - secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL); - } - - /* compute prec. */ - { - secp256k1_gej precj[1024]; /* Jacobian versions of prec. */ - secp256k1_gej gbase; - secp256k1_gej numsbase; - gbase = gj; /* 16^j * G */ - numsbase = nums_gej; /* 2^j * nums. */ - for (j = 0; j < 64; j++) { - /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ - precj[j*16] = numsbase; - for (i = 1; i < 16; i++) { - secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); - } - /* Multiply gbase by 16. */ - for (i = 0; i < 4; i++) { - secp256k1_gej_double_var(&gbase, &gbase, NULL); - } - /* Multiply numbase by 2. */ - secp256k1_gej_double_var(&numsbase, &numsbase, NULL); - if (j == 62) { - /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ - secp256k1_gej_neg(&numsbase, &numsbase); - secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); - } - } - secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb); - } - for (j = 0; j < 64; j++) { - for (i = 0; i < 16; i++) { - secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); - } - } -#else - (void)cb; - ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context; -#endif - secp256k1_ecmult_gen_blind(ctx, NULL); -} - -static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) { - return ctx->prec != NULL; -} - -static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst, - const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) { - if (src->prec == NULL) { - dst->prec = NULL; - } else { -#ifndef USE_ECMULT_STATIC_PRECOMPUTATION - dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec)); - memcpy(dst->prec, src->prec, sizeof(*dst->prec)); -#else - (void)cb; - dst->prec = src->prec; -#endif - dst->initial = src->initial; - dst->blind = src->blind; - } -} - -static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) { -#ifndef USE_ECMULT_STATIC_PRECOMPUTATION - free(ctx->prec); -#endif - secp256k1_scalar_clear(&ctx->blind); - secp256k1_gej_clear(&ctx->initial); - ctx->prec = NULL; -} - -static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) { - secp256k1_ge add; - secp256k1_ge_storage adds; - secp256k1_scalar gnb; - int bits; - int i, j; - memset(&adds, 0, sizeof(adds)); - *r = ctx->initial; - /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ - secp256k1_scalar_add(&gnb, gn, &ctx->blind); - add.infinity = 0; - for (j = 0; j < 64; j++) { - bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); - for (i = 0; i < 16; i++) { - /** This uses a conditional move to avoid any secret data in array indexes. - * _Any_ use of secret indexes has been demonstrated to result in timing - * sidechannels, even when the cache-line access patterns are uniform. - * See also: - * "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe - * (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and - * "Cache Attacks and Countermeasures: the Case of AES", RSA 2006, - * by Dag Arne Osvik, Adi Shamir, and Eran Tromer - * (http://www.tau.ac.il/~tromer/papers/cache.pdf) - */ - secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits); - } - secp256k1_ge_from_storage(&add, &adds); - secp256k1_gej_add_ge(r, r, &add); - } - bits = 0; - secp256k1_ge_clear(&add); - secp256k1_scalar_clear(&gnb); -} - -/* Setup blinding values for secp256k1_ecmult_gen. */ -static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) { - secp256k1_scalar b; - secp256k1_gej gb; - secp256k1_fe s; - unsigned char nonce32[32]; - secp256k1_rfc6979_hmac_sha256_t rng; - int retry; - unsigned char keydata[64] = {0}; - if (seed32 == NULL) { - /* When seed is NULL, reset the initial point and blinding value. */ - secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g); - secp256k1_gej_neg(&ctx->initial, &ctx->initial); - secp256k1_scalar_set_int(&ctx->blind, 1); - } - /* The prior blinding value (if not reset) is chained forward by including it in the hash. */ - secp256k1_scalar_get_b32(nonce32, &ctx->blind); - /** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data, - * and guards against weak or adversarial seeds. This is a simpler and safer interface than - * asking the caller for blinding values directly and expecting them to retry on failure. - */ - memcpy(keydata, nonce32, 32); - if (seed32 != NULL) { - memcpy(keydata + 32, seed32, 32); - } - secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32); - memset(keydata, 0, sizeof(keydata)); - /* Retry for out of range results to achieve uniformity. */ - do { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - retry = !secp256k1_fe_set_b32(&s, nonce32); - retry |= secp256k1_fe_is_zero(&s); - } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */ - /* Randomize the projection to defend against multiplier sidechannels. */ - secp256k1_gej_rescale(&ctx->initial, &s); - secp256k1_fe_clear(&s); - do { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - secp256k1_scalar_set_b32(&b, nonce32, &retry); - /* A blinding value of 0 works, but would undermine the projection hardening. */ - retry |= secp256k1_scalar_is_zero(&b); - } while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */ - secp256k1_rfc6979_hmac_sha256_finalize(&rng); - memset(nonce32, 0, 32); - secp256k1_ecmult_gen(ctx, &gb, &b); - secp256k1_scalar_negate(&b, &b); - ctx->blind = b; - ctx->initial = gb; - secp256k1_scalar_clear(&b); - secp256k1_gej_clear(&gb); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_impl.h deleted file mode 100644 index 4e40104ad..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/ecmult_impl.h +++ /dev/null @@ -1,406 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_ECMULT_IMPL_H_ -#define _SECP256K1_ECMULT_IMPL_H_ - -#include - -#include "group.h" -#include "scalar.h" -#include "ecmult.h" - -#if defined(EXHAUSTIVE_TEST_ORDER) -/* We need to lower these values for exhaustive tests because - * the tables cannot have infinities in them (this breaks the - * affine-isomorphism stuff which tracks z-ratios) */ -# if EXHAUSTIVE_TEST_ORDER > 128 -# define WINDOW_A 5 -# define WINDOW_G 8 -# elif EXHAUSTIVE_TEST_ORDER > 8 -# define WINDOW_A 4 -# define WINDOW_G 4 -# else -# define WINDOW_A 2 -# define WINDOW_G 2 -# endif -#else -/* optimal for 128-bit and 256-bit exponents. */ -#define WINDOW_A 5 -/** larger numbers may result in slightly better performance, at the cost of - exponentially larger precomputed tables. */ -#ifdef USE_ENDOMORPHISM -/** Two tables for window size 15: 1.375 MiB. */ -#define WINDOW_G 15 -#else -/** One table for window size 16: 1.375 MiB. */ -#define WINDOW_G 16 -#endif -#endif - -/** The number of entries a table with precomputed multiples needs to have. */ -#define ECMULT_TABLE_SIZE(w) (1 << ((w)-2)) - -/** Fill a table 'prej' with precomputed odd multiples of a. Prej will contain - * the values [1*a,3*a,...,(2*n-1)*a], so it space for n values. zr[0] will - * contain prej[0].z / a.z. The other zr[i] values = prej[i].z / prej[i-1].z. - * Prej's Z values are undefined, except for the last value. - */ -static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a) { - secp256k1_gej d; - secp256k1_ge a_ge, d_ge; - int i; - - VERIFY_CHECK(!a->infinity); - - secp256k1_gej_double_var(&d, a, NULL); - - /* - * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate - * of 'd', and scale the 1P starting value's x/y coordinates without changing its z. - */ - d_ge.x = d.x; - d_ge.y = d.y; - d_ge.infinity = 0; - - secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z); - prej[0].x = a_ge.x; - prej[0].y = a_ge.y; - prej[0].z = a->z; - prej[0].infinity = 0; - - zr[0] = d.z; - for (i = 1; i < n; i++) { - secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]); - } - - /* - * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only - * the final point's z coordinate is actually used though, so just update that. - */ - secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z); -} - -/** Fill a table 'pre' with precomputed odd multiples of a. - * - * There are two versions of this function: - * - secp256k1_ecmult_odd_multiples_table_globalz_windowa which brings its - * resulting point set to a single constant Z denominator, stores the X and Y - * coordinates as ge_storage points in pre, and stores the global Z in rz. - * It only operates on tables sized for WINDOW_A wnaf multiples. - * - secp256k1_ecmult_odd_multiples_table_storage_var, which converts its - * resulting point set to actually affine points, and stores those in pre. - * It operates on tables of any size, but uses heap-allocated temporaries. - * - * To compute a*P + b*G, we compute a table for P using the first function, - * and for G using the second (which requires an inverse, but it only needs to - * happen once). - */ -static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a) { - secp256k1_gej prej[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_fe zr[ECMULT_TABLE_SIZE(WINDOW_A)]; - - /* Compute the odd multiples in Jacobian form. */ - secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), prej, zr, a); - /* Bring them to the same Z denominator. */ - secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); -} - -static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge_storage *pre, const secp256k1_gej *a, const secp256k1_callback *cb) { - secp256k1_gej *prej = (secp256k1_gej*)checked_malloc(cb, sizeof(secp256k1_gej) * n); - secp256k1_ge *prea = (secp256k1_ge*)checked_malloc(cb, sizeof(secp256k1_ge) * n); - secp256k1_fe *zr = (secp256k1_fe*)checked_malloc(cb, sizeof(secp256k1_fe) * n); - int i; - - /* Compute the odd multiples in Jacobian form. */ - secp256k1_ecmult_odd_multiples_table(n, prej, zr, a); - /* Convert them in batch to affine coordinates. */ - secp256k1_ge_set_table_gej_var(prea, prej, zr, n); - /* Convert them to compact storage form. */ - for (i = 0; i < n; i++) { - secp256k1_ge_to_storage(&pre[i], &prea[i]); - } - - free(prea); - free(prej); - free(zr); -} - -/** The following two macro retrieves a particular odd multiple from a table - * of precomputed multiples. */ -#define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \ - VERIFY_CHECK(((n) & 1) == 1); \ - VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - if ((n) > 0) { \ - *(r) = (pre)[((n)-1)/2]; \ - } else { \ - secp256k1_ge_neg((r), &(pre)[(-(n)-1)/2]); \ - } \ -} while(0) - -#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \ - VERIFY_CHECK(((n) & 1) == 1); \ - VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \ - VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \ - if ((n) > 0) { \ - secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ - } else { \ - secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ - secp256k1_ge_neg((r), (r)); \ - } \ -} while(0) - -static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { - ctx->pre_g = NULL; -#ifdef USE_ENDOMORPHISM - ctx->pre_g_128 = NULL; -#endif -} - -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) { - secp256k1_gej gj; - - if (ctx->pre_g != NULL) { - return; - } - - /* get the generator */ - secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); - - ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); - - /* precompute the tables with odd multiples */ - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj, cb); - -#ifdef USE_ENDOMORPHISM - { - secp256k1_gej g_128j; - int i; - - ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); - - /* calculate 2^128*generator */ - g_128j = gj; - for (i = 0; i < 128; i++) { - secp256k1_gej_double_var(&g_128j, &g_128j, NULL); - } - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j, cb); - } -#endif -} - -static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst, - const secp256k1_ecmult_context *src, const secp256k1_callback *cb) { - if (src->pre_g == NULL) { - dst->pre_g = NULL; - } else { - size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); - dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); - memcpy(dst->pre_g, src->pre_g, size); - } -#ifdef USE_ENDOMORPHISM - if (src->pre_g_128 == NULL) { - dst->pre_g_128 = NULL; - } else { - size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); - dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); - memcpy(dst->pre_g_128, src->pre_g_128, size); - } -#endif -} - -static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx) { - return ctx->pre_g != NULL; -} - -static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { - free(ctx->pre_g); -#ifdef USE_ENDOMORPHISM - free(ctx->pre_g_128); -#endif - secp256k1_ecmult_context_init(ctx); -} - -/** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), - * with the following guarantees: - * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) - * - two non-zero entries in wnaf are separated by at least w-1 zeroes. - * - the number of set values in wnaf is returned. This number is at most 256, and at most one more - * than the number of bits in the (absolute value) of the input. - */ -static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { - secp256k1_scalar s = *a; - int last_set_bit = -1; - int bit = 0; - int sign = 1; - int carry = 0; - - VERIFY_CHECK(wnaf != NULL); - VERIFY_CHECK(0 <= len && len <= 256); - VERIFY_CHECK(a != NULL); - VERIFY_CHECK(2 <= w && w <= 31); - - memset(wnaf, 0, len * sizeof(wnaf[0])); - - if (secp256k1_scalar_get_bits(&s, 255, 1)) { - secp256k1_scalar_negate(&s, &s); - sign = -1; - } - - while (bit < len) { - int now; - int word; - if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { - bit++; - continue; - } - - now = w; - if (now > len - bit) { - now = len - bit; - } - - word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; - - carry = (word >> (w-1)) & 1; - word -= carry << w; - - wnaf[bit] = sign * word; - last_set_bit = bit; - - bit += now; - } -#ifdef VERIFY - CHECK(carry == 0); - while (bit < 256) { - CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); - } -#endif - return last_set_bit + 1; -} - -static void secp256k1_ecmult(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { - secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_ge tmpa; - secp256k1_fe Z; -#ifdef USE_ENDOMORPHISM - secp256k1_ge pre_a_lam[ECMULT_TABLE_SIZE(WINDOW_A)]; - secp256k1_scalar na_1, na_lam; - /* Splitted G factors. */ - secp256k1_scalar ng_1, ng_128; - int wnaf_na_1[130]; - int wnaf_na_lam[130]; - int bits_na_1; - int bits_na_lam; - int wnaf_ng_1[129]; - int bits_ng_1; - int wnaf_ng_128[129]; - int bits_ng_128; -#else - int wnaf_na[256]; - int bits_na; - int wnaf_ng[256]; - int bits_ng; -#endif - int i; - int bits; - -#ifdef USE_ENDOMORPHISM - /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&na_1, &na_lam, na); - - /* build wnaf representation for na_1 and na_lam. */ - bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, 130, &na_1, WINDOW_A); - bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, 130, &na_lam, WINDOW_A); - VERIFY_CHECK(bits_na_1 <= 130); - VERIFY_CHECK(bits_na_lam <= 130); - bits = bits_na_1; - if (bits_na_lam > bits) { - bits = bits_na_lam; - } -#else - /* build wnaf representation for na. */ - bits_na = secp256k1_ecmult_wnaf(wnaf_na, 256, na, WINDOW_A); - bits = bits_na; -#endif - - /* Calculate odd multiples of a. - * All multiples are brought to the same Z 'denominator', which is stored - * in Z. Due to secp256k1' isomorphism we can do all operations pretending - * that the Z coordinate was 1, use affine addition formulae, and correct - * the Z coordinate of the result once at the end. - * The exception is the precomputed G table points, which are actually - * affine. Compared to the base used for other points, they have a Z ratio - * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same - * isomorphism to efficiently add with a known Z inverse. - */ - secp256k1_ecmult_odd_multiples_table_globalz_windowa(pre_a, &Z, a); - -#ifdef USE_ENDOMORPHISM - for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { - secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]); - } - - /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ - secp256k1_scalar_split_128(&ng_1, &ng_128, ng); - - /* Build wnaf representation for ng_1 and ng_128 */ - bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); - bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); - if (bits_ng_1 > bits) { - bits = bits_ng_1; - } - if (bits_ng_128 > bits) { - bits = bits_ng_128; - } -#else - bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, 256, ng, WINDOW_G); - if (bits_ng > bits) { - bits = bits_ng; - } -#endif - - secp256k1_gej_set_infinity(r); - - for (i = bits - 1; i >= 0; i--) { - int n; - secp256k1_gej_double_var(r, r, NULL); -#ifdef USE_ENDOMORPHISM - if (i < bits_na_1 && (n = wnaf_na_1[i])) { - ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); - } - if (i < bits_na_lam && (n = wnaf_na_lam[i])) { - ECMULT_TABLE_GET_GE(&tmpa, pre_a_lam, n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); - } - if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); - } - if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g_128, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); - } -#else - if (i < bits_na && (n = wnaf_na[i])) { - ECMULT_TABLE_GET_GE(&tmpa, pre_a, n, WINDOW_A); - secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); - } - if (i < bits_ng && (n = wnaf_ng[i])) { - ECMULT_TABLE_GET_GE_STORAGE(&tmpa, *ctx->pre_g, n, WINDOW_G); - secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); - } -#endif - } - - if (!r->infinity) { - secp256k1_fe_mul(&r->z, &r->z, &Z); - } -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field.h deleted file mode 100644 index bbb1ee866..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field.h +++ /dev/null @@ -1,132 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_ -#define _SECP256K1_FIELD_ - -/** Field element module. - * - * Field elements can be represented in several ways, but code accessing - * it (and implementations) need to take certain properties into account: - * - Each field element can be normalized or not. - * - Each field element has a magnitude, which represents how far away - * its representation is away from normalization. Normalized elements - * always have a magnitude of 1, but a magnitude of 1 doesn't imply - * normality. - */ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(USE_FIELD_10X26) -#include "field_10x26.h" -#elif defined(USE_FIELD_5X52) -#include "field_5x52.h" -#else -#error "Please select field implementation" -#endif - -#include "util.h" - -/** Normalize a field element. */ -static void secp256k1_fe_normalize(secp256k1_fe *r); - -/** Weakly normalize a field element: reduce it magnitude to 1, but don't fully normalize. */ -static void secp256k1_fe_normalize_weak(secp256k1_fe *r); - -/** Normalize a field element, without constant-time guarantee. */ -static void secp256k1_fe_normalize_var(secp256k1_fe *r); - -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r); - -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r); - -/** Set a field element equal to a small integer. Resulting field element is normalized. */ -static void secp256k1_fe_set_int(secp256k1_fe *r, int a); - -/** Sets a field element equal to zero, initializing all fields. */ -static void secp256k1_fe_clear(secp256k1_fe *a); - -/** Verify whether a field element is zero. Requires the input to be normalized. */ -static int secp256k1_fe_is_zero(const secp256k1_fe *a); - -/** Check the "oddness" of a field element. Requires the input to be normalized. */ -static int secp256k1_fe_is_odd(const secp256k1_fe *a); - -/** Compare two field elements. Requires magnitude-1 inputs. */ -static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b); - -/** Same as secp256k1_fe_equal, but may be variable time. */ -static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b); - -/** Compare two field elements. Requires both inputs to be normalized */ -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b); - -/** Set a field element equal to 32-byte big endian value. If successful, the resulting field element is normalized. */ -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a); - -/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a); - -/** Set a field element equal to the additive inverse of another. Takes a maximum magnitude of the input - * as an argument. The magnitude of the output is one higher. */ -static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m); - -/** Multiplies the passed field element with a small integer constant. Multiplies the magnitude by that - * small integer. */ -static void secp256k1_fe_mul_int(secp256k1_fe *r, int a); - -/** Adds a field element to another. The result has the sum of the inputs' magnitudes as magnitude. */ -static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a); - -/** Sets a field element to be the product of two others. Requires the inputs' magnitudes to be at most 8. - * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b); - -/** Sets a field element to be the square of another. Requires the input's magnitude to be at most 8. - * The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a); - -/** If a has a square root, it is computed in r and 1 is returned. If a does not - * have a square root, the root of its negation is computed and 0 is returned. - * The input's magnitude can be at most 8. The output magnitude is 1 (but not - * guaranteed to be normalized). The result in r will always be a square - * itself. */ -static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a); - -/** Checks whether a field element is a quadratic residue. */ -static int secp256k1_fe_is_quad_var(const secp256k1_fe *a); - -/** Sets a field element to be the (modular) inverse of another. Requires the input's magnitude to be - * at most 8. The output magnitude is 1 (but not guaranteed to be normalized). */ -static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a); - -/** Potentially faster version of secp256k1_fe_inv, without constant-time guarantee. */ -static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a); - -/** Calculate the (modular) inverses of a batch of field elements. Requires the inputs' magnitudes to be - * at most 8. The output magnitudes are 1 (but not guaranteed to be normalized). The inputs and - * outputs must not overlap in memory. */ -static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len); - -/** Convert a field element to the storage type. */ -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); - -/** Convert a field element back from the storage type. */ -static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a); - -/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag); - -/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26.h deleted file mode 100644 index 61ee1e096..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26.h +++ /dev/null @@ -1,47 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_REPR_ -#define _SECP256K1_FIELD_REPR_ - -#include - -typedef struct { - /* X = sum(i=0..9, elem[i]*2^26) mod n */ - uint32_t n[10]; -#ifdef VERIFY - int magnitude; - int normalized; -#endif -} secp256k1_fe; - -/* Unpacks a constant into a overlapping multi-limbed FE element. */ -#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ - (d0) & 0x3FFFFFFUL, \ - (((uint32_t)d0) >> 26) | (((uint32_t)(d1) & 0xFFFFFUL) << 6), \ - (((uint32_t)d1) >> 20) | (((uint32_t)(d2) & 0x3FFFUL) << 12), \ - (((uint32_t)d2) >> 14) | (((uint32_t)(d3) & 0xFFUL) << 18), \ - (((uint32_t)d3) >> 8) | (((uint32_t)(d4) & 0x3UL) << 24), \ - (((uint32_t)d4) >> 2) & 0x3FFFFFFUL, \ - (((uint32_t)d4) >> 28) | (((uint32_t)(d5) & 0x3FFFFFUL) << 4), \ - (((uint32_t)d5) >> 22) | (((uint32_t)(d6) & 0xFFFFUL) << 10), \ - (((uint32_t)d6) >> 16) | (((uint32_t)(d7) & 0x3FFUL) << 16), \ - (((uint32_t)d7) >> 10) \ -} - -#ifdef VERIFY -#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1} -#else -#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))} -#endif - -typedef struct { - uint32_t n[8]; -} secp256k1_fe_storage; - -#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }} -#define SECP256K1_FE_STORAGE_CONST_GET(d) d.n[7], d.n[6], d.n[5], d.n[4],d.n[3], d.n[2], d.n[1], d.n[0] -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26_impl.h deleted file mode 100644 index 5fb092f1b..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_10x26_impl.h +++ /dev/null @@ -1,1140 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_REPR_IMPL_H_ -#define _SECP256K1_FIELD_REPR_IMPL_H_ - -#include "util.h" -#include "num.h" -#include "field.h" - -#ifdef VERIFY -static void secp256k1_fe_verify(const secp256k1_fe *a) { - const uint32_t *d = a->n; - int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; - r &= (d[0] <= 0x3FFFFFFUL * m); - r &= (d[1] <= 0x3FFFFFFUL * m); - r &= (d[2] <= 0x3FFFFFFUL * m); - r &= (d[3] <= 0x3FFFFFFUL * m); - r &= (d[4] <= 0x3FFFFFFUL * m); - r &= (d[5] <= 0x3FFFFFFUL * m); - r &= (d[6] <= 0x3FFFFFFUL * m); - r &= (d[7] <= 0x3FFFFFFUL * m); - r &= (d[8] <= 0x3FFFFFFUL * m); - r &= (d[9] <= 0x03FFFFFUL * m); - r &= (a->magnitude >= 0); - r &= (a->magnitude <= 32); - if (a->normalized) { - r &= (a->magnitude <= 1); - if (r && (d[9] == 0x03FFFFFUL)) { - uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2]; - if (mid == 0x3FFFFFFUL) { - r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL); - } - } - } - VERIFY_CHECK(r == 1); -} -#endif - -static void secp256k1_fe_normalize(secp256k1_fe *r) { - uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], - t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; - - /* Reduce t9 at the start so there will be at most a single carry from the first pass */ - uint32_t m; - uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8; - - /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t9 >> 23 == 0); - - /* At most a single final reduction is needed; check if the value is >= the field characteristic */ - x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL) - & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL)); - - /* Apply the final reduction (for constant-time behaviour, we do it always) */ - t0 += x * 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; - - /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */ - VERIFY_CHECK(t9 >> 22 == x); - - /* Mask off the possible multiple of 2^256 from the final reduction */ - t9 &= 0x03FFFFFUL; - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; - -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { - uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], - t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; - - /* Reduce t9 at the start so there will be at most a single carry from the first pass */ - uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; - - /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t9 >> 23 == 0); - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; - -#ifdef VERIFY - r->magnitude = 1; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_normalize_var(secp256k1_fe *r) { - uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], - t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; - - /* Reduce t9 at the start so there will be at most a single carry from the first pass */ - uint32_t m; - uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8; - - /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t9 >> 23 == 0); - - /* At most a single final reduction is needed; check if the value is >= the field characteristic */ - x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL) - & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL)); - - if (x) { - t0 += 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; - - /* If t9 didn't carry to bit 22 already, then it should have after any final reduction */ - VERIFY_CHECK(t9 >> 22 == x); - - /* Mask off the possible multiple of 2^256 from the final reduction */ - t9 &= 0x03FFFFFUL; - } - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - r->n[5] = t5; r->n[6] = t6; r->n[7] = t7; r->n[8] = t8; r->n[9] = t9; - -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { - uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], - t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; - - /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ - uint32_t z0, z1; - - /* Reduce t9 at the start so there will be at most a single carry from the first pass */ - uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x3D1UL; t1 += (x << 6); - t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL; - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8; - z0 |= t9; z1 &= t9 ^ 0x3C00000UL; - - /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t9 >> 23 == 0); - - return (z0 == 0) | (z1 == 0x3FFFFFFUL); -} - -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { - uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; - uint32_t z0, z1; - uint32_t x; - - t0 = r->n[0]; - t9 = r->n[9]; - - /* Reduce t9 at the start so there will be at most a single carry from the first pass */ - x = t9 >> 22; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x3D1UL; - - /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ - z0 = t0 & 0x3FFFFFFUL; - z1 = z0 ^ 0x3D0UL; - - /* Fast return path should catch the majority of cases */ - if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) { - return 0; - } - - t1 = r->n[1]; - t2 = r->n[2]; - t3 = r->n[3]; - t4 = r->n[4]; - t5 = r->n[5]; - t6 = r->n[6]; - t7 = r->n[7]; - t8 = r->n[8]; - - t9 &= 0x03FFFFFUL; - t1 += (x << 6); - - t1 += (t0 >> 26); - t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL; - t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2; - t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3; - t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4; - t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5; - t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6; - t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7; - t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8; - z0 |= t9; z1 &= t9 ^ 0x3C00000UL; - - /* ... except for a possible carry at bit 22 of t9 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t9 >> 23 == 0); - - return (z0 == 0) | (z1 == 0x3FFFFFFUL); -} - -SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { - r->n[0] = a; - r->n[1] = r->n[2] = r->n[3] = r->n[4] = r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { - const uint32_t *t = a->n; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0; -} - -SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - return a->n[0] & 1; -} - -SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { - int i; -#ifdef VERIFY - a->magnitude = 0; - a->normalized = 1; -#endif - for (i=0; i<10; i++) { - a->n[i] = 0; - } -} - -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { - int i; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - VERIFY_CHECK(b->normalized); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); -#endif - for (i = 9; i >= 0; i--) { - if (a->n[i] > b->n[i]) { - return 1; - } - if (a->n[i] < b->n[i]) { - return -1; - } - } - return 0; -} - -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { - int i; - r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; - r->n[5] = r->n[6] = r->n[7] = r->n[8] = r->n[9] = 0; - for (i=0; i<32; i++) { - int j; - for (j=0; j<4; j++) { - int limb = (8*i+2*j)/26; - int shift = (8*i+2*j)%26; - r->n[limb] |= (uint32_t)((a[31-i] >> (2*j)) & 0x3) << shift; - } - } - if (r->n[9] == 0x3FFFFFUL && (r->n[8] & r->n[7] & r->n[6] & r->n[5] & r->n[4] & r->n[3] & r->n[2]) == 0x3FFFFFFUL && (r->n[1] + 0x40UL + ((r->n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL) { - return 0; - } -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif - return 1; -} - -/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { - int i; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - for (i=0; i<32; i++) { - int j; - int c = 0; - for (j=0; j<4; j++) { - int limb = (8*i+2*j)/26; - int shift = (8*i+2*j)%26; - c |= ((a->n[limb] >> shift) & 0x3) << (2 * j); - } - r[31-i] = c; - } -} - -SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= m); - secp256k1_fe_verify(a); -#endif - r->n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->n[0]; - r->n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->n[1]; - r->n[2] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[2]; - r->n[3] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[3]; - r->n[4] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[4]; - r->n[5] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[5]; - r->n[6] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[6]; - r->n[7] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[7]; - r->n[8] = 0x3FFFFFFUL * 2 * (m + 1) - a->n[8]; - r->n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->n[9]; -#ifdef VERIFY - r->magnitude = m + 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { - r->n[0] *= a; - r->n[1] *= a; - r->n[2] *= a; - r->n[3] *= a; - r->n[4] *= a; - r->n[5] *= a; - r->n[6] *= a; - r->n[7] *= a; - r->n[8] *= a; - r->n[9] *= a; -#ifdef VERIFY - r->magnitude *= a; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { -#ifdef VERIFY - secp256k1_fe_verify(a); -#endif - r->n[0] += a->n[0]; - r->n[1] += a->n[1]; - r->n[2] += a->n[2]; - r->n[3] += a->n[3]; - r->n[4] += a->n[4]; - r->n[5] += a->n[5]; - r->n[6] += a->n[6]; - r->n[7] += a->n[7]; - r->n[8] += a->n[8]; - r->n[9] += a->n[9]; -#ifdef VERIFY - r->magnitude += a->magnitude; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -#if defined(USE_EXTERNAL_ASM) - -/* External assembler implementation */ -void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b); -void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); - -#else - -#ifdef VERIFY -#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif - -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { - uint64_t c, d; - uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; - uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7; - const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL; - - VERIFY_BITS(a[0], 30); - VERIFY_BITS(a[1], 30); - VERIFY_BITS(a[2], 30); - VERIFY_BITS(a[3], 30); - VERIFY_BITS(a[4], 30); - VERIFY_BITS(a[5], 30); - VERIFY_BITS(a[6], 30); - VERIFY_BITS(a[7], 30); - VERIFY_BITS(a[8], 30); - VERIFY_BITS(a[9], 26); - VERIFY_BITS(b[0], 30); - VERIFY_BITS(b[1], 30); - VERIFY_BITS(b[2], 30); - VERIFY_BITS(b[3], 30); - VERIFY_BITS(b[4], 30); - VERIFY_BITS(b[5], 30); - VERIFY_BITS(b[6], 30); - VERIFY_BITS(b[7], 30); - VERIFY_BITS(b[8], 30); - VERIFY_BITS(b[9], 26); - - /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. - * px is a shorthand for sum(a[i]*b[x-i], i=0..x). - * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. - */ - - d = (uint64_t)a[0] * b[9] - + (uint64_t)a[1] * b[8] - + (uint64_t)a[2] * b[7] - + (uint64_t)a[3] * b[6] - + (uint64_t)a[4] * b[5] - + (uint64_t)a[5] * b[4] - + (uint64_t)a[6] * b[3] - + (uint64_t)a[7] * b[2] - + (uint64_t)a[8] * b[1] - + (uint64_t)a[9] * b[0]; - /* VERIFY_BITS(d, 64); */ - /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ - t9 = d & M; d >>= 26; - VERIFY_BITS(t9, 26); - VERIFY_BITS(d, 38); - /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ - - c = (uint64_t)a[0] * b[0]; - VERIFY_BITS(c, 60); - /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ - d += (uint64_t)a[1] * b[9] - + (uint64_t)a[2] * b[8] - + (uint64_t)a[3] * b[7] - + (uint64_t)a[4] * b[6] - + (uint64_t)a[5] * b[5] - + (uint64_t)a[6] * b[4] - + (uint64_t)a[7] * b[3] - + (uint64_t)a[8] * b[2] - + (uint64_t)a[9] * b[1]; - VERIFY_BITS(d, 63); - /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - u0 = d & M; d >>= 26; c += u0 * R0; - VERIFY_BITS(u0, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 61); - /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - t0 = c & M; c >>= 26; c += u0 * R1; - VERIFY_BITS(t0, 26); - VERIFY_BITS(c, 37); - /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - - c += (uint64_t)a[0] * b[1] - + (uint64_t)a[1] * b[0]; - VERIFY_BITS(c, 62); - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ - d += (uint64_t)a[2] * b[9] - + (uint64_t)a[3] * b[8] - + (uint64_t)a[4] * b[7] - + (uint64_t)a[5] * b[6] - + (uint64_t)a[6] * b[5] - + (uint64_t)a[7] * b[4] - + (uint64_t)a[8] * b[3] - + (uint64_t)a[9] * b[2]; - VERIFY_BITS(d, 63); - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - u1 = d & M; d >>= 26; c += u1 * R0; - VERIFY_BITS(u1, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 63); - /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - t1 = c & M; c >>= 26; c += u1 * R1; - VERIFY_BITS(t1, 26); - VERIFY_BITS(c, 38); - /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - - c += (uint64_t)a[0] * b[2] - + (uint64_t)a[1] * b[1] - + (uint64_t)a[2] * b[0]; - VERIFY_BITS(c, 62); - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - d += (uint64_t)a[3] * b[9] - + (uint64_t)a[4] * b[8] - + (uint64_t)a[5] * b[7] - + (uint64_t)a[6] * b[6] - + (uint64_t)a[7] * b[5] - + (uint64_t)a[8] * b[4] - + (uint64_t)a[9] * b[3]; - VERIFY_BITS(d, 63); - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - u2 = d & M; d >>= 26; c += u2 * R0; - VERIFY_BITS(u2, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 63); - /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - t2 = c & M; c >>= 26; c += u2 * R1; - VERIFY_BITS(t2, 26); - VERIFY_BITS(c, 38); - /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[3] - + (uint64_t)a[1] * b[2] - + (uint64_t)a[2] * b[1] - + (uint64_t)a[3] * b[0]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - d += (uint64_t)a[4] * b[9] - + (uint64_t)a[5] * b[8] - + (uint64_t)a[6] * b[7] - + (uint64_t)a[7] * b[6] - + (uint64_t)a[8] * b[5] - + (uint64_t)a[9] * b[4]; - VERIFY_BITS(d, 63); - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - u3 = d & M; d >>= 26; c += u3 * R0; - VERIFY_BITS(u3, 26); - VERIFY_BITS(d, 37); - /* VERIFY_BITS(c, 64); */ - /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - t3 = c & M; c >>= 26; c += u3 * R1; - VERIFY_BITS(t3, 26); - VERIFY_BITS(c, 39); - /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[4] - + (uint64_t)a[1] * b[3] - + (uint64_t)a[2] * b[2] - + (uint64_t)a[3] * b[1] - + (uint64_t)a[4] * b[0]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[5] * b[9] - + (uint64_t)a[6] * b[8] - + (uint64_t)a[7] * b[7] - + (uint64_t)a[8] * b[6] - + (uint64_t)a[9] * b[5]; - VERIFY_BITS(d, 62); - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - u4 = d & M; d >>= 26; c += u4 * R0; - VERIFY_BITS(u4, 26); - VERIFY_BITS(d, 36); - /* VERIFY_BITS(c, 64); */ - /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - t4 = c & M; c >>= 26; c += u4 * R1; - VERIFY_BITS(t4, 26); - VERIFY_BITS(c, 39); - /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[5] - + (uint64_t)a[1] * b[4] - + (uint64_t)a[2] * b[3] - + (uint64_t)a[3] * b[2] - + (uint64_t)a[4] * b[1] - + (uint64_t)a[5] * b[0]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[6] * b[9] - + (uint64_t)a[7] * b[8] - + (uint64_t)a[8] * b[7] - + (uint64_t)a[9] * b[6]; - VERIFY_BITS(d, 62); - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - u5 = d & M; d >>= 26; c += u5 * R0; - VERIFY_BITS(u5, 26); - VERIFY_BITS(d, 36); - /* VERIFY_BITS(c, 64); */ - /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - t5 = c & M; c >>= 26; c += u5 * R1; - VERIFY_BITS(t5, 26); - VERIFY_BITS(c, 39); - /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[6] - + (uint64_t)a[1] * b[5] - + (uint64_t)a[2] * b[4] - + (uint64_t)a[3] * b[3] - + (uint64_t)a[4] * b[2] - + (uint64_t)a[5] * b[1] - + (uint64_t)a[6] * b[0]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[7] * b[9] - + (uint64_t)a[8] * b[8] - + (uint64_t)a[9] * b[7]; - VERIFY_BITS(d, 61); - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - u6 = d & M; d >>= 26; c += u6 * R0; - VERIFY_BITS(u6, 26); - VERIFY_BITS(d, 35); - /* VERIFY_BITS(c, 64); */ - /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - t6 = c & M; c >>= 26; c += u6 * R1; - VERIFY_BITS(t6, 26); - VERIFY_BITS(c, 39); - /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[7] - + (uint64_t)a[1] * b[6] - + (uint64_t)a[2] * b[5] - + (uint64_t)a[3] * b[4] - + (uint64_t)a[4] * b[3] - + (uint64_t)a[5] * b[2] - + (uint64_t)a[6] * b[1] - + (uint64_t)a[7] * b[0]; - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x8000007C00000007ULL); - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[8] * b[9] - + (uint64_t)a[9] * b[8]; - VERIFY_BITS(d, 58); - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - u7 = d & M; d >>= 26; c += u7 * R0; - VERIFY_BITS(u7, 26); - VERIFY_BITS(d, 32); - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL); - /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - t7 = c & M; c >>= 26; c += u7 * R1; - VERIFY_BITS(t7, 26); - VERIFY_BITS(c, 38); - /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)a[0] * b[8] - + (uint64_t)a[1] * b[7] - + (uint64_t)a[2] * b[6] - + (uint64_t)a[3] * b[5] - + (uint64_t)a[4] * b[4] - + (uint64_t)a[5] * b[3] - + (uint64_t)a[6] * b[2] - + (uint64_t)a[7] * b[1] - + (uint64_t)a[8] * b[0]; - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x9000007B80000008ULL); - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[9] * b[9]; - VERIFY_BITS(d, 57); - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - u8 = d & M; d >>= 26; c += u8 * R0; - VERIFY_BITS(u8, 26); - VERIFY_BITS(d, 31); - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - r[3] = t3; - VERIFY_BITS(r[3], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = t4; - VERIFY_BITS(r[4], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[5] = t5; - VERIFY_BITS(r[5], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[6] = t6; - VERIFY_BITS(r[6], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[7] = t7; - VERIFY_BITS(r[7], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - r[8] = c & M; c >>= 26; c += u8 * R1; - VERIFY_BITS(r[8], 26); - VERIFY_BITS(c, 39); - /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += d * R0 + t9; - VERIFY_BITS(c, 45); - /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4); - VERIFY_BITS(r[9], 22); - VERIFY_BITS(c, 46); - /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - d = c * (R0 >> 4) + t0; - VERIFY_BITS(d, 56); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[0] = d & M; d >>= 26; - VERIFY_BITS(r[0], 26); - VERIFY_BITS(d, 30); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += c * (R1 >> 4) + t1; - VERIFY_BITS(d, 53); - VERIFY_CHECK(d <= 0x10000003FFFFBFULL); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[1] = d & M; d >>= 26; - VERIFY_BITS(r[1], 26); - VERIFY_BITS(d, 27); - VERIFY_CHECK(d <= 0x4000000ULL); - /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += t2; - VERIFY_BITS(d, 27); - /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = d; - VERIFY_BITS(r[2], 27); - /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ -} - -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a) { - uint64_t c, d; - uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8; - uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7; - const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL; - - VERIFY_BITS(a[0], 30); - VERIFY_BITS(a[1], 30); - VERIFY_BITS(a[2], 30); - VERIFY_BITS(a[3], 30); - VERIFY_BITS(a[4], 30); - VERIFY_BITS(a[5], 30); - VERIFY_BITS(a[6], 30); - VERIFY_BITS(a[7], 30); - VERIFY_BITS(a[8], 30); - VERIFY_BITS(a[9], 26); - - /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. - * px is a shorthand for sum(a[i]*a[x-i], i=0..x). - * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. - */ - - d = (uint64_t)(a[0]*2) * a[9] - + (uint64_t)(a[1]*2) * a[8] - + (uint64_t)(a[2]*2) * a[7] - + (uint64_t)(a[3]*2) * a[6] - + (uint64_t)(a[4]*2) * a[5]; - /* VERIFY_BITS(d, 64); */ - /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ - t9 = d & M; d >>= 26; - VERIFY_BITS(t9, 26); - VERIFY_BITS(d, 38); - /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ - - c = (uint64_t)a[0] * a[0]; - VERIFY_BITS(c, 60); - /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ - d += (uint64_t)(a[1]*2) * a[9] - + (uint64_t)(a[2]*2) * a[8] - + (uint64_t)(a[3]*2) * a[7] - + (uint64_t)(a[4]*2) * a[6] - + (uint64_t)a[5] * a[5]; - VERIFY_BITS(d, 63); - /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - u0 = d & M; d >>= 26; c += u0 * R0; - VERIFY_BITS(u0, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 61); - /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - t0 = c & M; c >>= 26; c += u0 * R1; - VERIFY_BITS(t0, 26); - VERIFY_BITS(c, 37); - /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ - - c += (uint64_t)(a[0]*2) * a[1]; - VERIFY_BITS(c, 62); - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ - d += (uint64_t)(a[2]*2) * a[9] - + (uint64_t)(a[3]*2) * a[8] - + (uint64_t)(a[4]*2) * a[7] - + (uint64_t)(a[5]*2) * a[6]; - VERIFY_BITS(d, 63); - /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - u1 = d & M; d >>= 26; c += u1 * R0; - VERIFY_BITS(u1, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 63); - /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - t1 = c & M; c >>= 26; c += u1 * R1; - VERIFY_BITS(t1, 26); - VERIFY_BITS(c, 38); - /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[2] - + (uint64_t)a[1] * a[1]; - VERIFY_BITS(c, 62); - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - d += (uint64_t)(a[3]*2) * a[9] - + (uint64_t)(a[4]*2) * a[8] - + (uint64_t)(a[5]*2) * a[7] - + (uint64_t)a[6] * a[6]; - VERIFY_BITS(d, 63); - /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - u2 = d & M; d >>= 26; c += u2 * R0; - VERIFY_BITS(u2, 26); - VERIFY_BITS(d, 37); - VERIFY_BITS(c, 63); - /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - t2 = c & M; c >>= 26; c += u2 * R1; - VERIFY_BITS(t2, 26); - VERIFY_BITS(c, 38); - /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[3] - + (uint64_t)(a[1]*2) * a[2]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - d += (uint64_t)(a[4]*2) * a[9] - + (uint64_t)(a[5]*2) * a[8] - + (uint64_t)(a[6]*2) * a[7]; - VERIFY_BITS(d, 63); - /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - u3 = d & M; d >>= 26; c += u3 * R0; - VERIFY_BITS(u3, 26); - VERIFY_BITS(d, 37); - /* VERIFY_BITS(c, 64); */ - /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - t3 = c & M; c >>= 26; c += u3 * R1; - VERIFY_BITS(t3, 26); - VERIFY_BITS(c, 39); - /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[4] - + (uint64_t)(a[1]*2) * a[3] - + (uint64_t)a[2] * a[2]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - d += (uint64_t)(a[5]*2) * a[9] - + (uint64_t)(a[6]*2) * a[8] - + (uint64_t)a[7] * a[7]; - VERIFY_BITS(d, 62); - /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - u4 = d & M; d >>= 26; c += u4 * R0; - VERIFY_BITS(u4, 26); - VERIFY_BITS(d, 36); - /* VERIFY_BITS(c, 64); */ - /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - t4 = c & M; c >>= 26; c += u4 * R1; - VERIFY_BITS(t4, 26); - VERIFY_BITS(c, 39); - /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[5] - + (uint64_t)(a[1]*2) * a[4] - + (uint64_t)(a[2]*2) * a[3]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)(a[6]*2) * a[9] - + (uint64_t)(a[7]*2) * a[8]; - VERIFY_BITS(d, 62); - /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - u5 = d & M; d >>= 26; c += u5 * R0; - VERIFY_BITS(u5, 26); - VERIFY_BITS(d, 36); - /* VERIFY_BITS(c, 64); */ - /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - t5 = c & M; c >>= 26; c += u5 * R1; - VERIFY_BITS(t5, 26); - VERIFY_BITS(c, 39); - /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[6] - + (uint64_t)(a[1]*2) * a[5] - + (uint64_t)(a[2]*2) * a[4] - + (uint64_t)a[3] * a[3]; - VERIFY_BITS(c, 63); - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)(a[7]*2) * a[9] - + (uint64_t)a[8] * a[8]; - VERIFY_BITS(d, 61); - /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - u6 = d & M; d >>= 26; c += u6 * R0; - VERIFY_BITS(u6, 26); - VERIFY_BITS(d, 35); - /* VERIFY_BITS(c, 64); */ - /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - t6 = c & M; c >>= 26; c += u6 * R1; - VERIFY_BITS(t6, 26); - VERIFY_BITS(c, 39); - /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[7] - + (uint64_t)(a[1]*2) * a[6] - + (uint64_t)(a[2]*2) * a[5] - + (uint64_t)(a[3]*2) * a[4]; - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x8000007C00000007ULL); - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)(a[8]*2) * a[9]; - VERIFY_BITS(d, 58); - /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - u7 = d & M; d >>= 26; c += u7 * R0; - VERIFY_BITS(u7, 26); - VERIFY_BITS(d, 32); - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x800001703FFFC2F7ULL); - /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - t7 = c & M; c >>= 26; c += u7 * R1; - VERIFY_BITS(t7, 26); - VERIFY_BITS(c, 38); - /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ - - c += (uint64_t)(a[0]*2) * a[8] - + (uint64_t)(a[1]*2) * a[7] - + (uint64_t)(a[2]*2) * a[6] - + (uint64_t)(a[3]*2) * a[5] - + (uint64_t)a[4] * a[4]; - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x9000007B80000008ULL); - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint64_t)a[9] * a[9]; - VERIFY_BITS(d, 57); - /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - u8 = d & M; d >>= 26; c += u8 * R0; - VERIFY_BITS(u8, 26); - VERIFY_BITS(d, 31); - /* VERIFY_BITS(c, 64); */ - VERIFY_CHECK(c <= 0x9000016FBFFFC2F8ULL); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - r[3] = t3; - VERIFY_BITS(r[3], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = t4; - VERIFY_BITS(r[4], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[5] = t5; - VERIFY_BITS(r[5], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[6] = t6; - VERIFY_BITS(r[6], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[7] = t7; - VERIFY_BITS(r[7], 26); - /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - r[8] = c & M; c >>= 26; c += u8 * R1; - VERIFY_BITS(r[8], 26); - VERIFY_BITS(c, 39); - /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += d * R0 + t9; - VERIFY_BITS(c, 45); - /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4); - VERIFY_BITS(r[9], 22); - VERIFY_BITS(c, 46); - /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - d = c * (R0 >> 4) + t0; - VERIFY_BITS(d, 56); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[0] = d & M; d >>= 26; - VERIFY_BITS(r[0], 26); - VERIFY_BITS(d, 30); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += c * (R1 >> 4) + t1; - VERIFY_BITS(d, 53); - VERIFY_CHECK(d <= 0x10000003FFFFBFULL); - /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[1] = d & M; d >>= 26; - VERIFY_BITS(r[1], 26); - VERIFY_BITS(d, 27); - VERIFY_CHECK(d <= 0x4000000ULL); - /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - d += t2; - VERIFY_BITS(d, 27); - /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = d; - VERIFY_BITS(r[2], 27); - /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ -} -#endif - -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= 8); - VERIFY_CHECK(b->magnitude <= 8); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); - VERIFY_CHECK(r != b); -#endif - secp256k1_fe_mul_inner(r->n, a->n, b->n); -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= 8); - secp256k1_fe_verify(a); -#endif - secp256k1_fe_sqr_inner(r->n, a->n); -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { - uint32_t mask0, mask1; - mask0 = flag + ~((uint32_t)0); - mask1 = ~mask0; - r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); - r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); - r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); - r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); - r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); - r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1); - r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1); - r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); - r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1); - r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1); -#ifdef VERIFY - if (a->magnitude > r->magnitude) { - r->magnitude = a->magnitude; - } - r->normalized &= a->normalized; -#endif -} - -static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { - uint32_t mask0, mask1; - mask0 = flag + ~((uint32_t)0); - mask1 = ~mask0; - r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); - r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); - r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); - r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); - r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); - r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1); - r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1); - r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1); -} - -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif - r->n[0] = a->n[0] | a->n[1] << 26; - r->n[1] = a->n[1] >> 6 | a->n[2] << 20; - r->n[2] = a->n[2] >> 12 | a->n[3] << 14; - r->n[3] = a->n[3] >> 18 | a->n[4] << 8; - r->n[4] = a->n[4] >> 24 | a->n[5] << 2 | a->n[6] << 28; - r->n[5] = a->n[6] >> 4 | a->n[7] << 22; - r->n[6] = a->n[7] >> 10 | a->n[8] << 16; - r->n[7] = a->n[8] >> 16 | a->n[9] << 10; -} - -static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) { - r->n[0] = a->n[0] & 0x3FFFFFFUL; - r->n[1] = a->n[0] >> 26 | ((a->n[1] << 6) & 0x3FFFFFFUL); - r->n[2] = a->n[1] >> 20 | ((a->n[2] << 12) & 0x3FFFFFFUL); - r->n[3] = a->n[2] >> 14 | ((a->n[3] << 18) & 0x3FFFFFFUL); - r->n[4] = a->n[3] >> 8 | ((a->n[4] << 24) & 0x3FFFFFFUL); - r->n[5] = (a->n[4] >> 2) & 0x3FFFFFFUL; - r->n[6] = a->n[4] >> 28 | ((a->n[5] << 4) & 0x3FFFFFFUL); - r->n[7] = a->n[5] >> 22 | ((a->n[6] << 10) & 0x3FFFFFFUL); - r->n[8] = a->n[6] >> 16 | ((a->n[7] << 16) & 0x3FFFFFFUL); - r->n[9] = a->n[7] >> 10; -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; -#endif -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52.h deleted file mode 100644 index 8e69a560d..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52.h +++ /dev/null @@ -1,47 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_REPR_ -#define _SECP256K1_FIELD_REPR_ - -#include - -typedef struct { - /* X = sum(i=0..4, elem[i]*2^52) mod n */ - uint64_t n[5]; -#ifdef VERIFY - int magnitude; - int normalized; -#endif -} secp256k1_fe; - -/* Unpacks a constant into a overlapping multi-limbed FE element. */ -#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \ - (d0) | (((uint64_t)(d1) & 0xFFFFFUL) << 32), \ - ((uint64_t)(d1) >> 20) | (((uint64_t)(d2)) << 12) | (((uint64_t)(d3) & 0xFFUL) << 44), \ - ((uint64_t)(d3) >> 8) | (((uint64_t)(d4) & 0xFFFFFFFUL) << 24), \ - ((uint64_t)(d4) >> 28) | (((uint64_t)(d5)) << 4) | (((uint64_t)(d6) & 0xFFFFUL) << 36), \ - ((uint64_t)(d6) >> 16) | (((uint64_t)(d7)) << 16) \ -} - -#ifdef VERIFY -#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0)), 1, 1} -#else -#define SECP256K1_FE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {SECP256K1_FE_CONST_INNER((d7), (d6), (d5), (d4), (d3), (d2), (d1), (d0))} -#endif - -typedef struct { - uint64_t n[4]; -} secp256k1_fe_storage; - -#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \ - (d0) | (((uint64_t)(d1)) << 32), \ - (d2) | (((uint64_t)(d3)) << 32), \ - (d4) | (((uint64_t)(d5)) << 32), \ - (d6) | (((uint64_t)(d7)) << 32) \ -}} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h deleted file mode 100644 index 98cc004bf..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_asm_impl.h +++ /dev/null @@ -1,502 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2014 Diederik Huys, Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -/** - * Changelog: - * - March 2013, Diederik Huys: original version - * - November 2014, Pieter Wuille: updated to use Peter Dettman's parallel multiplication algorithm - * - December 2014, Pieter Wuille: converted from YASM to GCC inline assembly - */ - -#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ -#define _SECP256K1_FIELD_INNER5X52_IMPL_H_ - -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { -/** - * Registers: rdx:rax = multiplication accumulator - * r9:r8 = c - * r15:rcx = d - * r10-r14 = a0-a4 - * rbx = b - * rdi = r - * rsi = a / t? - */ - uint64_t tmp1, tmp2, tmp3; -__asm__ __volatile__( - "movq 0(%%rsi),%%r10\n" - "movq 8(%%rsi),%%r11\n" - "movq 16(%%rsi),%%r12\n" - "movq 24(%%rsi),%%r13\n" - "movq 32(%%rsi),%%r14\n" - - /* d += a3 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r13\n" - "movq %%rax,%%rcx\n" - "movq %%rdx,%%r15\n" - /* d += a2 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d = a0 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c = a4 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r14\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += (c & M) * R */ - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* t3 (tmp1) = d & M */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - "movq %%rsi,%q1\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* d += a4 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a0 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += c * R */ - "movq %%r8,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* t4 = d & M (%%rsi) */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* tx = t4 >> 48 (tmp3) */ - "movq %%rsi,%%rax\n" - "shrq $48,%%rax\n" - "movq %%rax,%q3\n" - /* t4 &= (M >> 4) (tmp2) */ - "movq $0xffffffffffff,%%rax\n" - "andq %%rax,%%rsi\n" - "movq %%rsi,%q2\n" - /* c = a0 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r10\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += a4 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a1 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* u0 = d & M (%%rsi) */ - "movq %%rcx,%%rsi\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* u0 = (u0 << 4) | tx (%%rsi) */ - "shlq $4,%%rsi\n" - "movq %q3,%%rax\n" - "orq %%rax,%%rsi\n" - /* c += u0 * (R >> 4) */ - "movq $0x1000003d1,%%rax\n" - "mulq %%rsi\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[0] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,0(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a1 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a0 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a4 * b2 */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a2 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c += (d & M) * R */ - "movq %%rcx,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 */ - "shrdq $52,%%r15,%%rcx\n" - "xorq %%r15,%%r15\n" - /* r[1] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,8(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a2 * b0 */ - "movq 0(%%rbx),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a1 * b1 */ - "movq 8(%%rbx),%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* c += a0 * b2 (last use of %%r10 = a0) */ - "movq 16(%%rbx),%%rax\n" - "mulq %%r10\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* fetch t3 (%%r10, overwrites a0), t4 (%%rsi) */ - "movq %q2,%%rsi\n" - "movq %q1,%%r10\n" - /* d += a4 * b3 */ - "movq 24(%%rbx),%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* d += a3 * b4 */ - "movq 32(%%rbx),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rcx\n" - "adcq %%rdx,%%r15\n" - /* c += (d & M) * R */ - "movq %%rcx,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 (%%rcx only) */ - "shrdq $52,%%r15,%%rcx\n" - /* r[2] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,16(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += t3 */ - "addq %%r10,%%r8\n" - /* c += d * R */ - "movq %%rcx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[3] = c & M */ - "movq %%r8,%%rax\n" - "movq $0xfffffffffffff,%%rdx\n" - "andq %%rdx,%%rax\n" - "movq %%rax,24(%%rdi)\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* c += t4 (%%r8 only) */ - "addq %%rsi,%%r8\n" - /* r[4] = c */ - "movq %%r8,32(%%rdi)\n" -: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3) -: "b"(b), "D"(r) -: "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" -); -} - -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { -/** - * Registers: rdx:rax = multiplication accumulator - * r9:r8 = c - * rcx:rbx = d - * r10-r14 = a0-a4 - * r15 = M (0xfffffffffffff) - * rdi = r - * rsi = a / t? - */ - uint64_t tmp1, tmp2, tmp3; -__asm__ __volatile__( - "movq 0(%%rsi),%%r10\n" - "movq 8(%%rsi),%%r11\n" - "movq 16(%%rsi),%%r12\n" - "movq 24(%%rsi),%%r13\n" - "movq 32(%%rsi),%%r14\n" - "movq $0xfffffffffffff,%%r15\n" - - /* d = (a0*2) * a3 */ - "leaq (%%r10,%%r10,1),%%rax\n" - "mulq %%r13\n" - "movq %%rax,%%rbx\n" - "movq %%rdx,%%rcx\n" - /* d += (a1*2) * a2 */ - "leaq (%%r11,%%r11,1),%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c = a4 * a4 */ - "movq %%r14,%%rax\n" - "mulq %%r14\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += (c & M) * R */ - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* t3 (tmp1) = d & M */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - "movq %%rsi,%q1\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* a4 *= 2 */ - "addq %%r14,%%r14\n" - /* d += a0 * a4 */ - "movq %%r10,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d+= (a1*2) * a3 */ - "leaq (%%r11,%%r11,1),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += a2 * a2 */ - "movq %%r12,%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += c * R */ - "movq %%r8,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* t4 = d & M (%%rsi) */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* tx = t4 >> 48 (tmp3) */ - "movq %%rsi,%%rax\n" - "shrq $48,%%rax\n" - "movq %%rax,%q3\n" - /* t4 &= (M >> 4) (tmp2) */ - "movq $0xffffffffffff,%%rax\n" - "andq %%rax,%%rsi\n" - "movq %%rsi,%q2\n" - /* c = a0 * a0 */ - "movq %%r10,%%rax\n" - "mulq %%r10\n" - "movq %%rax,%%r8\n" - "movq %%rdx,%%r9\n" - /* d += a1 * a4 */ - "movq %%r11,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += (a2*2) * a3 */ - "leaq (%%r12,%%r12,1),%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* u0 = d & M (%%rsi) */ - "movq %%rbx,%%rsi\n" - "andq %%r15,%%rsi\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* u0 = (u0 << 4) | tx (%%rsi) */ - "shlq $4,%%rsi\n" - "movq %q3,%%rax\n" - "orq %%rax,%%rsi\n" - /* c += u0 * (R >> 4) */ - "movq $0x1000003d1,%%rax\n" - "mulq %%rsi\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[0] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,0(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* a0 *= 2 */ - "addq %%r10,%%r10\n" - /* c += a0 * a1 */ - "movq %%r10,%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a2 * a4 */ - "movq %%r12,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* d += a3 * a3 */ - "movq %%r13,%%rax\n" - "mulq %%r13\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c += (d & M) * R */ - "movq %%rbx,%%rax\n" - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 */ - "shrdq $52,%%rcx,%%rbx\n" - "xorq %%rcx,%%rcx\n" - /* r[1] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,8(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += a0 * a2 (last use of %%r10) */ - "movq %%r10,%%rax\n" - "mulq %%r12\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* fetch t3 (%%r10, overwrites a0),t4 (%%rsi) */ - "movq %q2,%%rsi\n" - "movq %q1,%%r10\n" - /* c += a1 * a1 */ - "movq %%r11,%%rax\n" - "mulq %%r11\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d += a3 * a4 */ - "movq %%r13,%%rax\n" - "mulq %%r14\n" - "addq %%rax,%%rbx\n" - "adcq %%rdx,%%rcx\n" - /* c += (d & M) * R */ - "movq %%rbx,%%rax\n" - "andq %%r15,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* d >>= 52 (%%rbx only) */ - "shrdq $52,%%rcx,%%rbx\n" - /* r[2] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,16(%%rdi)\n" - /* c >>= 52 */ - "shrdq $52,%%r9,%%r8\n" - "xorq %%r9,%%r9\n" - /* c += t3 */ - "addq %%r10,%%r8\n" - /* c += d * R */ - "movq %%rbx,%%rax\n" - "movq $0x1000003d10,%%rdx\n" - "mulq %%rdx\n" - "addq %%rax,%%r8\n" - "adcq %%rdx,%%r9\n" - /* r[3] = c & M */ - "movq %%r8,%%rax\n" - "andq %%r15,%%rax\n" - "movq %%rax,24(%%rdi)\n" - /* c >>= 52 (%%r8 only) */ - "shrdq $52,%%r9,%%r8\n" - /* c += t4 (%%r8 only) */ - "addq %%rsi,%%r8\n" - /* r[4] = c */ - "movq %%r8,32(%%rdi)\n" -: "+S"(a), "=m"(tmp1), "=m"(tmp2), "=m"(tmp3) -: "D"(r) -: "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory" -); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_impl.h deleted file mode 100644 index dd88f38c7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_impl.h +++ /dev/null @@ -1,451 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_REPR_IMPL_H_ -#define _SECP256K1_FIELD_REPR_IMPL_H_ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include "util.h" -#include "num.h" -#include "field.h" - -#if defined(USE_ASM_X86_64) -#include "field_5x52_asm_impl.h" -#else -#include "field_5x52_int128_impl.h" -#endif - -/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F, - * represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular, - * each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element - * is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations - * accept any input with magnitude at most M, and have different rules for propagating magnitude to their - * output. - */ - -#ifdef VERIFY -static void secp256k1_fe_verify(const secp256k1_fe *a) { - const uint64_t *d = a->n; - int m = a->normalized ? 1 : 2 * a->magnitude, r = 1; - /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m); - r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m); - r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m); - r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m); - r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m); - r &= (a->magnitude >= 0); - r &= (a->magnitude <= 2048); - if (a->normalized) { - r &= (a->magnitude <= 1); - if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) { - r &= (d[0] < 0xFFFFEFFFFFC2FULL); - } - } - VERIFY_CHECK(r == 1); -} -#endif - -static void secp256k1_fe_normalize(secp256k1_fe *r) { - uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; - - /* Reduce t4 at the start so there will be at most a single carry from the first pass */ - uint64_t m; - uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3; - - /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t4 >> 49 == 0); - - /* At most a single final reduction is needed; check if the value is >= the field characteristic */ - x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL) - & (t0 >= 0xFFFFEFFFFFC2FULL)); - - /* Apply the final reduction (for constant-time behaviour, we do it always) */ - t0 += x * 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; - - /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */ - VERIFY_CHECK(t4 >> 48 == x); - - /* Mask off the possible multiple of 2^256 from the final reduction */ - t4 &= 0x0FFFFFFFFFFFFULL; - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { - uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; - - /* Reduce t4 at the start so there will be at most a single carry from the first pass */ - uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; - - /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t4 >> 49 == 0); - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - -#ifdef VERIFY - r->magnitude = 1; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_normalize_var(secp256k1_fe *r) { - uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; - - /* Reduce t4 at the start so there will be at most a single carry from the first pass */ - uint64_t m; - uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3; - - /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t4 >> 49 == 0); - - /* At most a single final reduction is needed; check if the value is >= the field characteristic */ - x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL) - & (t0 >= 0xFFFFEFFFFFC2FULL)); - - if (x) { - t0 += 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; - - /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */ - VERIFY_CHECK(t4 >> 48 == x); - - /* Mask off the possible multiple of 2^256 from the final reduction */ - t4 &= 0x0FFFFFFFFFFFFULL; - } - - r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; - -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) { - uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; - - /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ - uint64_t z0, z1; - - /* Reduce t4 at the start so there will be at most a single carry from the first pass */ - uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x1000003D1ULL; - t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL; - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3; - z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL; - - /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t4 >> 49 == 0); - - return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); -} - -static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) { - uint64_t t0, t1, t2, t3, t4; - uint64_t z0, z1; - uint64_t x; - - t0 = r->n[0]; - t4 = r->n[4]; - - /* Reduce t4 at the start so there will be at most a single carry from the first pass */ - x = t4 >> 48; - - /* The first pass ensures the magnitude is 1, ... */ - t0 += x * 0x1000003D1ULL; - - /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ - z0 = t0 & 0xFFFFFFFFFFFFFULL; - z1 = z0 ^ 0x1000003D0ULL; - - /* Fast return path should catch the majority of cases */ - if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) { - return 0; - } - - t1 = r->n[1]; - t2 = r->n[2]; - t3 = r->n[3]; - - t4 &= 0x0FFFFFFFFFFFFULL; - - t1 += (t0 >> 52); - t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1; - t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2; - t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3; - z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL; - - /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */ - VERIFY_CHECK(t4 >> 49 == 0); - - return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); -} - -SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { - r->n[0] = a; - r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { - const uint64_t *t = a->n; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0; -} - -SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - return a->n[0] & 1; -} - -SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { - int i; -#ifdef VERIFY - a->magnitude = 0; - a->normalized = 1; -#endif - for (i=0; i<5; i++) { - a->n[i] = 0; - } -} - -static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { - int i; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - VERIFY_CHECK(b->normalized); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); -#endif - for (i = 4; i >= 0; i--) { - if (a->n[i] > b->n[i]) { - return 1; - } - if (a->n[i] < b->n[i]) { - return -1; - } - } - return 0; -} - -static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) { - int i; - r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0; - for (i=0; i<32; i++) { - int j; - for (j=0; j<2; j++) { - int limb = (8*i+4*j)/52; - int shift = (8*i+4*j)%52; - r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift; - } - } - if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) { - return 0; - } -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; - secp256k1_fe_verify(r); -#endif - return 1; -} - -/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */ -static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { - int i; -#ifdef VERIFY - VERIFY_CHECK(a->normalized); - secp256k1_fe_verify(a); -#endif - for (i=0; i<32; i++) { - int j; - int c = 0; - for (j=0; j<2; j++) { - int limb = (8*i+4*j)/52; - int shift = (8*i+4*j)%52; - c |= ((a->n[limb] >> shift) & 0xF) << (4 * j); - } - r[31-i] = c; - } -} - -SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= m); - secp256k1_fe_verify(a); -#endif - r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0]; - r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1]; - r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2]; - r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3]; - r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4]; -#ifdef VERIFY - r->magnitude = m + 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) { - r->n[0] *= a; - r->n[1] *= a; - r->n[2] *= a; - r->n[3] *= a; - r->n[4] *= a; -#ifdef VERIFY - r->magnitude *= a; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { -#ifdef VERIFY - secp256k1_fe_verify(a); -#endif - r->n[0] += a->n[0]; - r->n[1] += a->n[1]; - r->n[2] += a->n[2]; - r->n[3] += a->n[3]; - r->n[4] += a->n[4]; -#ifdef VERIFY - r->magnitude += a->magnitude; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= 8); - VERIFY_CHECK(b->magnitude <= 8); - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); - VERIFY_CHECK(r != b); -#endif - secp256k1_fe_mul_inner(r->n, a->n, b->n); -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->magnitude <= 8); - secp256k1_fe_verify(a); -#endif - secp256k1_fe_sqr_inner(r->n, a->n); -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 0; - secp256k1_fe_verify(r); -#endif -} - -static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { - uint64_t mask0, mask1; - mask0 = flag + ~((uint64_t)0); - mask1 = ~mask0; - r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); - r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); - r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); - r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); - r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1); -#ifdef VERIFY - if (a->magnitude > r->magnitude) { - r->magnitude = a->magnitude; - } - r->normalized &= a->normalized; -#endif -} - -static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) { - uint64_t mask0, mask1; - mask0 = flag + ~((uint64_t)0); - mask1 = ~mask0; - r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1); - r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1); - r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1); - r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1); -} - -static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { -#ifdef VERIFY - VERIFY_CHECK(a->normalized); -#endif - r->n[0] = a->n[0] | a->n[1] << 52; - r->n[1] = a->n[1] >> 12 | a->n[2] << 40; - r->n[2] = a->n[2] >> 24 | a->n[3] << 28; - r->n[3] = a->n[3] >> 36 | a->n[4] << 16; -} - -static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) { - r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL; - r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL); - r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL); - r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL); - r->n[4] = a->n[3] >> 16; -#ifdef VERIFY - r->magnitude = 1; - r->normalized = 1; -#endif -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h deleted file mode 100644 index 0bf22bdd3..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_5x52_int128_impl.h +++ /dev/null @@ -1,277 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_INNER5X52_IMPL_H_ -#define _SECP256K1_FIELD_INNER5X52_IMPL_H_ - -#include - -#ifdef VERIFY -#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif - -SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { - uint128_t c, d; - uint64_t t3, t4, tx, u0; - uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; - const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; - - VERIFY_BITS(a[0], 56); - VERIFY_BITS(a[1], 56); - VERIFY_BITS(a[2], 56); - VERIFY_BITS(a[3], 56); - VERIFY_BITS(a[4], 52); - VERIFY_BITS(b[0], 56); - VERIFY_BITS(b[1], 56); - VERIFY_BITS(b[2], 56); - VERIFY_BITS(b[3], 56); - VERIFY_BITS(b[4], 52); - VERIFY_CHECK(r != b); - - /* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. - * px is a shorthand for sum(a[i]*b[x-i], i=0..x). - * Note that [x 0 0 0 0 0] = [x*R]. - */ - - d = (uint128_t)a0 * b[3] - + (uint128_t)a1 * b[2] - + (uint128_t)a2 * b[1] - + (uint128_t)a3 * b[0]; - VERIFY_BITS(d, 114); - /* [d 0 0 0] = [p3 0 0 0] */ - c = (uint128_t)a4 * b[4]; - VERIFY_BITS(c, 112); - /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (c & M) * R; c >>= 52; - VERIFY_BITS(d, 115); - VERIFY_BITS(c, 60); - /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = d & M; d >>= 52; - VERIFY_BITS(t3, 52); - VERIFY_BITS(d, 63); - /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - - d += (uint128_t)a0 * b[4] - + (uint128_t)a1 * b[3] - + (uint128_t)a2 * b[2] - + (uint128_t)a3 * b[1] - + (uint128_t)a4 * b[0]; - VERIFY_BITS(d, 115); - /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - d += c * R; - VERIFY_BITS(d, 116); - /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = d & M; d >>= 52; - VERIFY_BITS(t4, 52); - VERIFY_BITS(d, 64); - /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - tx = (t4 >> 48); t4 &= (M >> 4); - VERIFY_BITS(tx, 4); - VERIFY_BITS(t4, 48); - /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - - c = (uint128_t)a0 * b[0]; - VERIFY_BITS(c, 112); - /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - d += (uint128_t)a1 * b[4] - + (uint128_t)a2 * b[3] - + (uint128_t)a3 * b[2] - + (uint128_t)a4 * b[1]; - VERIFY_BITS(d, 115); - /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = d & M; d >>= 52; - VERIFY_BITS(u0, 52); - VERIFY_BITS(d, 63); - /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = (u0 << 4) | tx; - VERIFY_BITS(u0, 56); - /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (uint128_t)u0 * (R >> 4); - VERIFY_BITS(c, 115); - /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = c & M; c >>= 52; - VERIFY_BITS(r[0], 52); - VERIFY_BITS(c, 61); - /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ - - c += (uint128_t)a0 * b[1] - + (uint128_t)a1 * b[0]; - VERIFY_BITS(c, 114); - /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - d += (uint128_t)a2 * b[4] - + (uint128_t)a3 * b[3] - + (uint128_t)a4 * b[2]; - VERIFY_BITS(d, 114); - /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); - /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = c & M; c >>= 52; - VERIFY_BITS(r[1], 52); - VERIFY_BITS(c, 63); - /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - - c += (uint128_t)a0 * b[2] - + (uint128_t)a1 * b[1] - + (uint128_t)a2 * b[0]; - VERIFY_BITS(c, 114); - /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint128_t)a3 * b[4] - + (uint128_t)a4 * b[3]; - VERIFY_BITS(d, 114); - /* [d 0 0 t4 t3 c t1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); - /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = c & M; c >>= 52; - VERIFY_BITS(r[2], 52); - VERIFY_BITS(c, 63); - /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += d * R + t3; - VERIFY_BITS(c, 100); - /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = c & M; c >>= 52; - VERIFY_BITS(r[3], 52); - VERIFY_BITS(c, 48); - /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += t4; - VERIFY_BITS(c, 49); - /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = c; - VERIFY_BITS(r[4], 49); - /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ -} - -SECP256K1_INLINE static void secp256k1_fe_sqr_inner(uint64_t *r, const uint64_t *a) { - uint128_t c, d; - uint64_t a0 = a[0], a1 = a[1], a2 = a[2], a3 = a[3], a4 = a[4]; - int64_t t3, t4, tx, u0; - const uint64_t M = 0xFFFFFFFFFFFFFULL, R = 0x1000003D10ULL; - - VERIFY_BITS(a[0], 56); - VERIFY_BITS(a[1], 56); - VERIFY_BITS(a[2], 56); - VERIFY_BITS(a[3], 56); - VERIFY_BITS(a[4], 52); - - /** [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. - * px is a shorthand for sum(a[i]*a[x-i], i=0..x). - * Note that [x 0 0 0 0 0] = [x*R]. - */ - - d = (uint128_t)(a0*2) * a3 - + (uint128_t)(a1*2) * a2; - VERIFY_BITS(d, 114); - /* [d 0 0 0] = [p3 0 0 0] */ - c = (uint128_t)a4 * a4; - VERIFY_BITS(c, 112); - /* [c 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - d += (c & M) * R; c >>= 52; - VERIFY_BITS(d, 115); - VERIFY_BITS(c, 60); - /* [c 0 0 0 0 0 d 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - t3 = d & M; d >>= 52; - VERIFY_BITS(t3, 52); - VERIFY_BITS(d, 63); - /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 0 p3 0 0 0] */ - - a4 *= 2; - d += (uint128_t)a0 * a4 - + (uint128_t)(a1*2) * a3 - + (uint128_t)a2 * a2; - VERIFY_BITS(d, 115); - /* [c 0 0 0 0 d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - d += c * R; - VERIFY_BITS(d, 116); - /* [d t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - t4 = d & M; d >>= 52; - VERIFY_BITS(t4, 52); - VERIFY_BITS(d, 64); - /* [d t4 t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - tx = (t4 >> 48); t4 &= (M >> 4); - VERIFY_BITS(tx, 4); - VERIFY_BITS(t4, 48); - /* [d t4+(tx<<48) t3 0 0 0] = [p8 0 0 0 p4 p3 0 0 0] */ - - c = (uint128_t)a0 * a0; - VERIFY_BITS(c, 112); - /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 0 p4 p3 0 0 p0] */ - d += (uint128_t)a1 * a4 - + (uint128_t)(a2*2) * a3; - VERIFY_BITS(d, 114); - /* [d t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = d & M; d >>= 52; - VERIFY_BITS(u0, 52); - VERIFY_BITS(d, 62); - /* [d u0 t4+(tx<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - /* [d 0 t4+(tx<<48)+(u0<<52) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - u0 = (u0 << 4) | tx; - VERIFY_BITS(u0, 56); - /* [d 0 t4+(u0<<48) t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - c += (uint128_t)u0 * (R >> 4); - VERIFY_BITS(c, 113); - /* [d 0 t4 t3 0 0 c] = [p8 0 0 p5 p4 p3 0 0 p0] */ - r[0] = c & M; c >>= 52; - VERIFY_BITS(r[0], 52); - VERIFY_BITS(c, 61); - /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 0 p0] */ - - a0 *= 2; - c += (uint128_t)a0 * a1; - VERIFY_BITS(c, 114); - /* [d 0 t4 t3 0 c r0] = [p8 0 0 p5 p4 p3 0 p1 p0] */ - d += (uint128_t)a2 * a4 - + (uint128_t)a3 * a3; - VERIFY_BITS(d, 114); - /* [d 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); - /* [d 0 0 t4 t3 0 c r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - r[1] = c & M; c >>= 52; - VERIFY_BITS(r[1], 52); - VERIFY_BITS(c, 63); - /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 0 p1 p0] */ - - c += (uint128_t)a0 * a2 - + (uint128_t)a1 * a1; - VERIFY_BITS(c, 114); - /* [d 0 0 t4 t3 c r1 r0] = [p8 0 p6 p5 p4 p3 p2 p1 p0] */ - d += (uint128_t)a3 * a4; - VERIFY_BITS(d, 114); - /* [d 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += (d & M) * R; d >>= 52; - VERIFY_BITS(c, 115); - VERIFY_BITS(d, 62); - /* [d 0 0 0 t4 t3 c r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[2] = c & M; c >>= 52; - VERIFY_BITS(r[2], 52); - VERIFY_BITS(c, 63); - /* [d 0 0 0 t4 t3+c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - - c += d * R + t3; - VERIFY_BITS(c, 100); - /* [t4 c r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[3] = c & M; c >>= 52; - VERIFY_BITS(r[3], 52); - VERIFY_BITS(c, 48); - /* [t4+c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - c += t4; - VERIFY_BITS(c, 49); - /* [c r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ - r[4] = c; - VERIFY_BITS(r[4], 49); - /* [r4 r3 r2 r1 r0] = [p8 p7 p6 p5 p4 p3 p2 p1 p0] */ -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_impl.h deleted file mode 100644 index 5127b279b..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/field_impl.h +++ /dev/null @@ -1,315 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_FIELD_IMPL_H_ -#define _SECP256K1_FIELD_IMPL_H_ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include "util.h" - -#if defined(USE_FIELD_10X26) -#include "field_10x26_impl.h" -#elif defined(USE_FIELD_5X52) -#include "field_5x52_impl.h" -#else -#error "Please select field implementation" -#endif - -SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe na; - secp256k1_fe_negate(&na, a, 1); - secp256k1_fe_add(&na, b); - return secp256k1_fe_normalizes_to_zero(&na); -} - -SECP256K1_INLINE static int secp256k1_fe_equal_var(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe na; - secp256k1_fe_negate(&na, a, 1); - secp256k1_fe_add(&na, b); - return secp256k1_fe_normalizes_to_zero_var(&na); -} - -static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { - /** Given that p is congruent to 3 mod 4, we can compute the square root of - * a mod p as the (p+1)/4'th power of a. - * - * As (p+1)/4 is an even number, it will have the same result for a and for - * (-a). Only one of these two numbers actually has a square root however, - * so we test at the end by squaring and comparing to the input. - * Also because (p+1)/4 is an even number, the computed square root is - * itself always a square (a ** ((p+1)/4) is the square of a ** ((p+1)/8)). - */ - secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; - int j; - - /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in - * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] - */ - - secp256k1_fe_sqr(&x2, a); - secp256k1_fe_mul(&x2, &x2, a); - - secp256k1_fe_sqr(&x3, &x2); - secp256k1_fe_mul(&x3, &x3, a); - - x6 = x3; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x6, &x6); - } - secp256k1_fe_mul(&x6, &x6, &x3); - - x9 = x6; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x9, &x9); - } - secp256k1_fe_mul(&x9, &x9, &x3); - - x11 = x9; - for (j=0; j<2; j++) { - secp256k1_fe_sqr(&x11, &x11); - } - secp256k1_fe_mul(&x11, &x11, &x2); - - x22 = x11; - for (j=0; j<11; j++) { - secp256k1_fe_sqr(&x22, &x22); - } - secp256k1_fe_mul(&x22, &x22, &x11); - - x44 = x22; - for (j=0; j<22; j++) { - secp256k1_fe_sqr(&x44, &x44); - } - secp256k1_fe_mul(&x44, &x44, &x22); - - x88 = x44; - for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x88, &x88); - } - secp256k1_fe_mul(&x88, &x88, &x44); - - x176 = x88; - for (j=0; j<88; j++) { - secp256k1_fe_sqr(&x176, &x176); - } - secp256k1_fe_mul(&x176, &x176, &x88); - - x220 = x176; - for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x220, &x220); - } - secp256k1_fe_mul(&x220, &x220, &x44); - - x223 = x220; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x223, &x223); - } - secp256k1_fe_mul(&x223, &x223, &x3); - - /* The final result is then assembled using a sliding window over the blocks. */ - - t1 = x223; - for (j=0; j<23; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(&t1, &t1, &x22); - for (j=0; j<6; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(&t1, &t1, &x2); - secp256k1_fe_sqr(&t1, &t1); - secp256k1_fe_sqr(r, &t1); - - /* Check that a square root was actually calculated */ - - secp256k1_fe_sqr(&t1, r); - return secp256k1_fe_equal(&t1, a); -} - -static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *a) { - secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; - int j; - - /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in - * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] - */ - - secp256k1_fe_sqr(&x2, a); - secp256k1_fe_mul(&x2, &x2, a); - - secp256k1_fe_sqr(&x3, &x2); - secp256k1_fe_mul(&x3, &x3, a); - - x6 = x3; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x6, &x6); - } - secp256k1_fe_mul(&x6, &x6, &x3); - - x9 = x6; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x9, &x9); - } - secp256k1_fe_mul(&x9, &x9, &x3); - - x11 = x9; - for (j=0; j<2; j++) { - secp256k1_fe_sqr(&x11, &x11); - } - secp256k1_fe_mul(&x11, &x11, &x2); - - x22 = x11; - for (j=0; j<11; j++) { - secp256k1_fe_sqr(&x22, &x22); - } - secp256k1_fe_mul(&x22, &x22, &x11); - - x44 = x22; - for (j=0; j<22; j++) { - secp256k1_fe_sqr(&x44, &x44); - } - secp256k1_fe_mul(&x44, &x44, &x22); - - x88 = x44; - for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x88, &x88); - } - secp256k1_fe_mul(&x88, &x88, &x44); - - x176 = x88; - for (j=0; j<88; j++) { - secp256k1_fe_sqr(&x176, &x176); - } - secp256k1_fe_mul(&x176, &x176, &x88); - - x220 = x176; - for (j=0; j<44; j++) { - secp256k1_fe_sqr(&x220, &x220); - } - secp256k1_fe_mul(&x220, &x220, &x44); - - x223 = x220; - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&x223, &x223); - } - secp256k1_fe_mul(&x223, &x223, &x3); - - /* The final result is then assembled using a sliding window over the blocks. */ - - t1 = x223; - for (j=0; j<23; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(&t1, &t1, &x22); - for (j=0; j<5; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(&t1, &t1, a); - for (j=0; j<3; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(&t1, &t1, &x2); - for (j=0; j<2; j++) { - secp256k1_fe_sqr(&t1, &t1); - } - secp256k1_fe_mul(r, a, &t1); -} - -static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) { -#if defined(USE_FIELD_INV_BUILTIN) - secp256k1_fe_inv(r, a); -#elif defined(USE_FIELD_INV_NUM) - secp256k1_num n, m; - static const secp256k1_fe negone = SECP256K1_FE_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL - ); - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - unsigned char b[32]; - int res; - secp256k1_fe c = *a; - secp256k1_fe_normalize_var(&c); - secp256k1_fe_get_b32(b, &c); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_num_set_bin(&m, prime, 32); - secp256k1_num_mod_inverse(&n, &n, &m); - secp256k1_num_get_bin(b, 32, &n); - res = secp256k1_fe_set_b32(r, b); - (void)res; - VERIFY_CHECK(res); - /* Verify the result is the (unique) valid inverse using non-GMP code. */ - secp256k1_fe_mul(&c, &c, r); - secp256k1_fe_add(&c, &negone); - CHECK(secp256k1_fe_normalizes_to_zero_var(&c)); -#else -#error "Please select field inverse implementation" -#endif -} - -static void secp256k1_fe_inv_all_var(secp256k1_fe *r, const secp256k1_fe *a, size_t len) { - secp256k1_fe u; - size_t i; - if (len < 1) { - return; - } - - VERIFY_CHECK((r + len <= a) || (a + len <= r)); - - r[0] = a[0]; - - i = 0; - while (++i < len) { - secp256k1_fe_mul(&r[i], &r[i - 1], &a[i]); - } - - secp256k1_fe_inv_var(&u, &r[--i]); - - while (i > 0) { - size_t j = i--; - secp256k1_fe_mul(&r[j], &r[i], &u); - secp256k1_fe_mul(&u, &u, &a[j]); - } - - r[0] = u; -} - -static int secp256k1_fe_is_quad_var(const secp256k1_fe *a) { -#ifndef USE_NUM_NONE - unsigned char b[32]; - secp256k1_num n; - secp256k1_num m; - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - - secp256k1_fe c = *a; - secp256k1_fe_normalize_var(&c); - secp256k1_fe_get_b32(b, &c); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_num_set_bin(&m, prime, 32); - return secp256k1_num_jacobi(&n, &m) >= 0; -#else - secp256k1_fe r; - return secp256k1_fe_sqrt(&r, a); -#endif -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/gen_context.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/gen_context.c deleted file mode 100644 index 1835fd491..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/gen_context.c +++ /dev/null @@ -1,74 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014, 2015 Thomas Daede, Cory Fields * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#define USE_BASIC_CONFIG 1 - -#include "basic-config.h" -#include "include/secp256k1.h" -#include "field_impl.h" -#include "scalar_impl.h" -#include "group_impl.h" -#include "ecmult_gen_impl.h" - -static void default_error_callback_fn(const char* str, void* data) { - (void)data; - fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); - abort(); -} - -static const secp256k1_callback default_error_callback = { - default_error_callback_fn, - NULL -}; - -int main(int argc, char **argv) { - secp256k1_ecmult_gen_context ctx; - int inner; - int outer; - FILE* fp; - - (void)argc; - (void)argv; - - fp = fopen("src/ecmult_static_context.h","w"); - if (fp == NULL) { - fprintf(stderr, "Could not open src/ecmult_static_context.h for writing!\n"); - return -1; - } - - fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); - fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); - fprintf(fp, "#include \"group.h\"\n"); - fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n"); - fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n"); - - secp256k1_ecmult_gen_context_init(&ctx); - secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback); - for(outer = 0; outer != 64; outer++) { - fprintf(fp,"{\n"); - for(inner = 0; inner != 16; inner++) { - fprintf(fp," SC(%uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu)", SECP256K1_GE_STORAGE_CONST_GET((*ctx.prec)[outer][inner])); - if (inner != 15) { - fprintf(fp,",\n"); - } else { - fprintf(fp,"\n"); - } - } - if (outer != 63) { - fprintf(fp,"},\n"); - } else { - fprintf(fp,"}\n"); - } - } - fprintf(fp,"};\n"); - secp256k1_ecmult_gen_context_clear(&ctx); - - fprintf(fp, "#undef SC\n"); - fprintf(fp, "#endif\n"); - fclose(fp); - - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group.h deleted file mode 100644 index 4957b248f..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group.h +++ /dev/null @@ -1,144 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_GROUP_ -#define _SECP256K1_GROUP_ - -#include "num.h" -#include "field.h" - -/** A group element of the secp256k1 curve, in affine coordinates. */ -typedef struct { - secp256k1_fe x; - secp256k1_fe y; - int infinity; /* whether this represents the point at infinity */ -} secp256k1_ge; - -#define SECP256K1_GE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), 0} -#define SECP256K1_GE_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} - -/** A group element of the secp256k1 curve, in jacobian coordinates. */ -typedef struct { - secp256k1_fe x; /* actual X: x/z^2 */ - secp256k1_fe y; /* actual Y: y/z^3 */ - secp256k1_fe z; - int infinity; /* whether this represents the point at infinity */ -} secp256k1_gej; - -#define SECP256K1_GEJ_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_CONST((i),(j),(k),(l),(m),(n),(o),(p)), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), 0} -#define SECP256K1_GEJ_CONST_INFINITY {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), 1} - -typedef struct { - secp256k1_fe_storage x; - secp256k1_fe_storage y; -} secp256k1_ge_storage; - -#define SECP256K1_GE_STORAGE_CONST(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) {SECP256K1_FE_STORAGE_CONST((a),(b),(c),(d),(e),(f),(g),(h)), SECP256K1_FE_STORAGE_CONST((i),(j),(k),(l),(m),(n),(o),(p))} - -#define SECP256K1_GE_STORAGE_CONST_GET(t) SECP256K1_FE_STORAGE_CONST_GET(t.x), SECP256K1_FE_STORAGE_CONST_GET(t.y) - -/** Set a group element equal to the point with given X and Y coordinates */ -static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y); - -/** Set a group element (affine) equal to the point with the given X coordinate - * and a Y coordinate that is a quadratic residue modulo p. The return value - * is true iff a coordinate with the given X coordinate exists. - */ -static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x); - -/** Set a group element (affine) equal to the point with the given X coordinate, and given oddness - * for Y. Return value indicates whether the result is valid. */ -static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd); - -/** Check whether a group element is the point at infinity. */ -static int secp256k1_ge_is_infinity(const secp256k1_ge *a); - -/** Check whether a group element is valid (i.e., on the curve). */ -static int secp256k1_ge_is_valid_var(const secp256k1_ge *a); - -static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); - -/** Set a group element equal to another which is given in jacobian coordinates */ -static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); - -/** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb); - -/** Set a batch of group elements equal to the inputs given in jacobian - * coordinates (with known z-ratios). zr must contain the known z-ratios such - * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */ -static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len); - -/** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to - * the same global z "denominator". zr must contain the known z-ratios such - * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. The x and y - * coordinates of the result are stored in r, the common z coordinate is - * stored in globalz. */ -static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr); - -/** Set a group element (jacobian) equal to the point at infinity. */ -static void secp256k1_gej_set_infinity(secp256k1_gej *r); - -/** Set a group element (jacobian) equal to another which is given in affine coordinates. */ -static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a); - -/** Compare the X coordinate of a group element (jacobian). */ -static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a); - -/** Set r equal to the inverse of a (i.e., mirrored around the X axis) */ -static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a); - -/** Check whether a group element is the point at infinity. */ -static int secp256k1_gej_is_infinity(const secp256k1_gej *a); - -/** Check whether a group element's y coordinate is a quadratic residue. */ -static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a); - -/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). - * a may not be zero. Constant time. */ -static void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); - -/** Set r equal to the double of a. If rzr is not-NULL, r->z = a->z * *rzr (where infinity means an implicit z = 0). */ -static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr); - -/** Set r equal to the sum of a and b. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ -static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr); - -/** Set r equal to the sum of a and b (with b given in affine coordinates, and not infinity). */ -static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b); - -/** Set r equal to the sum of a and b (with b given in affine coordinates). This is more efficient - than secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge but without constant-time - guarantee, and b is allowed to be infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be infinity in that case). */ -static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr); - -/** Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv). */ -static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv); - -#ifdef USE_ENDOMORPHISM -/** Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast. */ -static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a); -#endif - -/** Clear a secp256k1_gej to prevent leaking sensitive information. */ -static void secp256k1_gej_clear(secp256k1_gej *r); - -/** Clear a secp256k1_ge to prevent leaking sensitive information. */ -static void secp256k1_ge_clear(secp256k1_ge *r); - -/** Convert a group element to the storage type. */ -static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a); - -/** Convert a group element back from the storage type. */ -static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a); - -/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */ -static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag); - -/** Rescale a jacobian point by b which must be non-zero. Constant-time. */ -static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group_impl.h deleted file mode 100644 index 7d723532f..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/group_impl.h +++ /dev/null @@ -1,700 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_GROUP_IMPL_H_ -#define _SECP256K1_GROUP_IMPL_H_ - -#include "num.h" -#include "field.h" -#include "group.h" - -/* These points can be generated in sage as follows: - * - * 0. Setup a worksheet with the following parameters. - * b = 4 # whatever CURVE_B will be set to - * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) - * C = EllipticCurve ([F (0), F (b)]) - * - * 1. Determine all the small orders available to you. (If there are - * no satisfactory ones, go back and change b.) - * print C.order().factor(limit=1000) - * - * 2. Choose an order as one of the prime factors listed in the above step. - * (You can also multiply some to get a composite order, though the - * tests will crash trying to invert scalars during signing.) We take a - * random point and scale it to drop its order to the desired value. - * There is some probability this won't work; just try again. - * order = 199 - * P = C.random_point() - * P = (int(P.order()) / int(order)) * P - * assert(P.order() == order) - * - * 3. Print the values. You'll need to use a vim macro or something to - * split the hex output into 4-byte chunks. - * print "%x %x" % P.xy() - */ -#if defined(EXHAUSTIVE_TEST_ORDER) -# if EXHAUSTIVE_TEST_ORDER == 199 -const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( - 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, - 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, - 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, - 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED -); - -const int CURVE_B = 4; -# elif EXHAUSTIVE_TEST_ORDER == 13 -const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( - 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, - 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, - 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, - 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac -); -const int CURVE_B = 2; -# else -# error No known generator for the specified exhaustive test group order. -# endif -#else -/** Generator for secp256k1, value 'g' defined in - * "Standards for Efficient Cryptography" (SEC2) 2.7.1. - */ -static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( - 0x79BE667EUL, 0xF9DCBBACUL, 0x55A06295UL, 0xCE870B07UL, - 0x029BFCDBUL, 0x2DCE28D9UL, 0x59F2815BUL, 0x16F81798UL, - 0x483ADA77UL, 0x26A3C465UL, 0x5DA4FBFCUL, 0x0E1108A8UL, - 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL -); - -const int CURVE_B = 7; -#endif - -static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { - secp256k1_fe zi2; - secp256k1_fe zi3; - secp256k1_fe_sqr(&zi2, zi); - secp256k1_fe_mul(&zi3, &zi2, zi); - secp256k1_fe_mul(&r->x, &a->x, &zi2); - secp256k1_fe_mul(&r->y, &a->y, &zi3); - r->infinity = a->infinity; -} - -static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) { - r->infinity = 0; - r->x = *x; - r->y = *y; -} - -static int secp256k1_ge_is_infinity(const secp256k1_ge *a) { - return a->infinity; -} - -static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) { - *r = *a; - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_negate(&r->y, &r->y, 1); -} - -static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { - secp256k1_fe z2, z3; - r->infinity = a->infinity; - secp256k1_fe_inv(&a->z, &a->z); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_mul(&z3, &a->z, &z2); - secp256k1_fe_mul(&a->x, &a->x, &z2); - secp256k1_fe_mul(&a->y, &a->y, &z3); - secp256k1_fe_set_int(&a->z, 1); - r->x = a->x; - r->y = a->y; -} - -static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { - secp256k1_fe z2, z3; - r->infinity = a->infinity; - if (a->infinity) { - return; - } - secp256k1_fe_inv_var(&a->z, &a->z); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_mul(&z3, &a->z, &z2); - secp256k1_fe_mul(&a->x, &a->x, &z2); - secp256k1_fe_mul(&a->y, &a->y, &z3); - secp256k1_fe_set_int(&a->z, 1); - r->x = a->x; - r->y = a->y; -} - -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) { - secp256k1_fe *az; - secp256k1_fe *azi; - size_t i; - size_t count = 0; - az = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * len); - for (i = 0; i < len; i++) { - if (!a[i].infinity) { - az[count++] = a[i].z; - } - } - - azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count); - secp256k1_fe_inv_all_var(azi, az, count); - free(az); - - count = 0; - for (i = 0; i < len; i++) { - r[i].infinity = a[i].infinity; - if (!a[i].infinity) { - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]); - } - } - free(azi); -} - -static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) { - size_t i = len - 1; - secp256k1_fe zi; - - if (len > 0) { - /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */ - secp256k1_fe_inv(&zi, &a[i].z); - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); - - /* Work out way backwards, using the z-ratios to scale the x/y values. */ - while (i > 0) { - secp256k1_fe_mul(&zi, &zi, &zr[i]); - i--; - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); - } - } -} - -static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr) { - size_t i = len - 1; - secp256k1_fe zs; - - if (len > 0) { - /* The z of the final point gives us the "global Z" for the table. */ - r[i].x = a[i].x; - r[i].y = a[i].y; - *globalz = a[i].z; - r[i].infinity = 0; - zs = zr[i]; - - /* Work our way backwards, using the z-ratios to scale the x/y values. */ - while (i > 0) { - if (i != len - 1) { - secp256k1_fe_mul(&zs, &zs, &zr[i]); - } - i--; - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zs); - } - } -} - -static void secp256k1_gej_set_infinity(secp256k1_gej *r) { - r->infinity = 1; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); - secp256k1_fe_clear(&r->z); -} - -static void secp256k1_gej_clear(secp256k1_gej *r) { - r->infinity = 0; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); - secp256k1_fe_clear(&r->z); -} - -static void secp256k1_ge_clear(secp256k1_ge *r) { - r->infinity = 0; - secp256k1_fe_clear(&r->x); - secp256k1_fe_clear(&r->y); -} - -static int secp256k1_ge_set_xquad(secp256k1_ge *r, const secp256k1_fe *x) { - secp256k1_fe x2, x3, c; - r->x = *x; - secp256k1_fe_sqr(&x2, x); - secp256k1_fe_mul(&x3, x, &x2); - r->infinity = 0; - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&c, &x3); - return secp256k1_fe_sqrt(&r->y, &c); -} - -static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { - if (!secp256k1_ge_set_xquad(r, x)) { - return 0; - } - secp256k1_fe_normalize_var(&r->y); - if (secp256k1_fe_is_odd(&r->y) != odd) { - secp256k1_fe_negate(&r->y, &r->y, 1); - } - return 1; - -} - -static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) { - r->infinity = a->infinity; - r->x = a->x; - r->y = a->y; - secp256k1_fe_set_int(&r->z, 1); -} - -static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) { - secp256k1_fe r, r2; - VERIFY_CHECK(!a->infinity); - secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); - r2 = a->x; secp256k1_fe_normalize_weak(&r2); - return secp256k1_fe_equal_var(&r, &r2); -} - -static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { - r->infinity = a->infinity; - r->x = a->x; - r->y = a->y; - r->z = a->z; - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_negate(&r->y, &r->y, 1); -} - -static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { - return a->infinity; -} - -static int secp256k1_gej_is_valid_var(const secp256k1_gej *a) { - secp256k1_fe y2, x3, z2, z6; - if (a->infinity) { - return 0; - } - /** y^2 = x^3 + 7 - * (Y/Z^3)^2 = (X/Z^2)^3 + 7 - * Y^2 / Z^6 = X^3 / Z^6 + 7 - * Y^2 = X^3 + 7*Z^6 - */ - secp256k1_fe_sqr(&y2, &a->y); - secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_sqr(&z2, &a->z); - secp256k1_fe_sqr(&z6, &z2); secp256k1_fe_mul(&z6, &z6, &z2); - secp256k1_fe_mul_int(&z6, CURVE_B); - secp256k1_fe_add(&x3, &z6); - secp256k1_fe_normalize_weak(&x3); - return secp256k1_fe_equal_var(&y2, &x3); -} - -static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { - secp256k1_fe y2, x3, c; - if (a->infinity) { - return 0; - } - /* y^2 = x^3 + 7 */ - secp256k1_fe_sqr(&y2, &a->y); - secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x); - secp256k1_fe_set_int(&c, CURVE_B); - secp256k1_fe_add(&x3, &c); - secp256k1_fe_normalize_weak(&x3); - return secp256k1_fe_equal_var(&y2, &x3); -} - -static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { - /* Operations: 3 mul, 4 sqr, 0 normalize, 12 mul_int/add/negate. - * - * Note that there is an implementation described at - * https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - * which trades a multiply for a square, but in practice this is actually slower, - * mainly because it requires more normalizations. - */ - secp256k1_fe t1,t2,t3,t4; - /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, - * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have - * y=0, x^3 must be -7 mod p. However, -7 has no cube root mod p. - * - * Having said this, if this function receives a point on a sextic twist, e.g. by - * a fault attack, it is possible for y to be 0. This happens for y^2 = x^3 + 6, - * since -6 does have a cube root mod p. For this point, this function will not set - * the infinity flag even though the point doubles to infinity, and the result - * point will be gibberish (z = 0 but infinity = 0). - */ - r->infinity = a->infinity; - if (r->infinity) { - if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); - } - return; - } - - if (rzr != NULL) { - *rzr = a->y; - secp256k1_fe_normalize_weak(rzr); - secp256k1_fe_mul_int(rzr, 2); - } - - secp256k1_fe_mul(&r->z, &a->z, &a->y); - secp256k1_fe_mul_int(&r->z, 2); /* Z' = 2*Y*Z (2) */ - secp256k1_fe_sqr(&t1, &a->x); - secp256k1_fe_mul_int(&t1, 3); /* T1 = 3*X^2 (3) */ - secp256k1_fe_sqr(&t2, &t1); /* T2 = 9*X^4 (1) */ - secp256k1_fe_sqr(&t3, &a->y); - secp256k1_fe_mul_int(&t3, 2); /* T3 = 2*Y^2 (2) */ - secp256k1_fe_sqr(&t4, &t3); - secp256k1_fe_mul_int(&t4, 2); /* T4 = 8*Y^4 (2) */ - secp256k1_fe_mul(&t3, &t3, &a->x); /* T3 = 2*X*Y^2 (1) */ - r->x = t3; - secp256k1_fe_mul_int(&r->x, 4); /* X' = 8*X*Y^2 (4) */ - secp256k1_fe_negate(&r->x, &r->x, 4); /* X' = -8*X*Y^2 (5) */ - secp256k1_fe_add(&r->x, &t2); /* X' = 9*X^4 - 8*X*Y^2 (6) */ - secp256k1_fe_negate(&t2, &t2, 1); /* T2 = -9*X^4 (2) */ - secp256k1_fe_mul_int(&t3, 6); /* T3 = 12*X*Y^2 (6) */ - secp256k1_fe_add(&t3, &t2); /* T3 = 12*X*Y^2 - 9*X^4 (8) */ - secp256k1_fe_mul(&r->y, &t1, &t3); /* Y' = 36*X^3*Y^2 - 27*X^6 (1) */ - secp256k1_fe_negate(&t2, &t4, 2); /* T2 = -8*Y^4 (3) */ - secp256k1_fe_add(&r->y, &t2); /* Y' = 36*X^3*Y^2 - 27*X^6 - 8*Y^4 (4) */ -} - -static SECP256K1_INLINE void secp256k1_gej_double_nonzero(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { - VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); - secp256k1_gej_double_var(r, a, rzr); -} - -static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { - /* Operations: 12 mul, 4 sqr, 2 normalize, 12 mul_int/add/negate */ - secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; - - if (a->infinity) { - VERIFY_CHECK(rzr == NULL); - *r = *b; - return; - } - - if (b->infinity) { - if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); - } - *r = *a; - return; - } - - r->infinity = 0; - secp256k1_fe_sqr(&z22, &b->z); - secp256k1_fe_sqr(&z12, &a->z); - secp256k1_fe_mul(&u1, &a->x, &z22); - secp256k1_fe_mul(&u2, &b->x, &z12); - secp256k1_fe_mul(&s1, &a->y, &z22); secp256k1_fe_mul(&s1, &s1, &b->z); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, rzr); - } else { - if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 0); - } - r->infinity = 1; - } - return; - } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - secp256k1_fe_mul(&h, &h, &b->z); - if (rzr != NULL) { - *rzr = h; - } - secp256k1_fe_mul(&r->z, &a->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); -} - -static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { - /* 8 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; - if (a->infinity) { - VERIFY_CHECK(rzr == NULL); - secp256k1_gej_set_ge(r, b); - return; - } - if (b->infinity) { - if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 1); - } - *r = *a; - return; - } - r->infinity = 0; - - secp256k1_fe_sqr(&z12, &a->z); - u1 = a->x; secp256k1_fe_normalize_weak(&u1); - secp256k1_fe_mul(&u2, &b->x, &z12); - s1 = a->y; secp256k1_fe_normalize_weak(&s1); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &a->z); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, rzr); - } else { - if (rzr != NULL) { - secp256k1_fe_set_int(rzr, 0); - } - r->infinity = 1; - } - return; - } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - if (rzr != NULL) { - *rzr = h; - } - secp256k1_fe_mul(&r->z, &a->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); -} - -static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { - /* 9 mul, 3 sqr, 4 normalize, 12 mul_int/add/negate */ - secp256k1_fe az, z12, u1, u2, s1, s2, h, i, i2, h2, h3, t; - - if (b->infinity) { - *r = *a; - return; - } - if (a->infinity) { - secp256k1_fe bzinv2, bzinv3; - r->infinity = b->infinity; - secp256k1_fe_sqr(&bzinv2, bzinv); - secp256k1_fe_mul(&bzinv3, &bzinv2, bzinv); - secp256k1_fe_mul(&r->x, &b->x, &bzinv2); - secp256k1_fe_mul(&r->y, &b->y, &bzinv3); - secp256k1_fe_set_int(&r->z, 1); - return; - } - r->infinity = 0; - - /** We need to calculate (rx,ry,rz) = (ax,ay,az) + (bx,by,1/bzinv). Due to - * secp256k1's isomorphism we can multiply the Z coordinates on both sides - * by bzinv, and get: (rx,ry,rz*bzinv) = (ax,ay,az*bzinv) + (bx,by,1). - * This means that (rx,ry,rz) can be calculated as - * (ax,ay,az*bzinv) + (bx,by,1), when not applying the bzinv factor to rz. - * The variable az below holds the modified Z coordinate for a, which is used - * for the computation of rx and ry, but not for rz. - */ - secp256k1_fe_mul(&az, &a->z, bzinv); - - secp256k1_fe_sqr(&z12, &az); - u1 = a->x; secp256k1_fe_normalize_weak(&u1); - secp256k1_fe_mul(&u2, &b->x, &z12); - s1 = a->y; secp256k1_fe_normalize_weak(&s1); - secp256k1_fe_mul(&s2, &b->y, &z12); secp256k1_fe_mul(&s2, &s2, &az); - secp256k1_fe_negate(&h, &u1, 1); secp256k1_fe_add(&h, &u2); - secp256k1_fe_negate(&i, &s1, 1); secp256k1_fe_add(&i, &s2); - if (secp256k1_fe_normalizes_to_zero_var(&h)) { - if (secp256k1_fe_normalizes_to_zero_var(&i)) { - secp256k1_gej_double_var(r, a, NULL); - } else { - r->infinity = 1; - } - return; - } - secp256k1_fe_sqr(&i2, &i); - secp256k1_fe_sqr(&h2, &h); - secp256k1_fe_mul(&h3, &h, &h2); - r->z = a->z; secp256k1_fe_mul(&r->z, &r->z, &h); - secp256k1_fe_mul(&t, &u1, &h2); - r->x = t; secp256k1_fe_mul_int(&r->x, 2); secp256k1_fe_add(&r->x, &h3); secp256k1_fe_negate(&r->x, &r->x, 3); secp256k1_fe_add(&r->x, &i2); - secp256k1_fe_negate(&r->y, &r->x, 5); secp256k1_fe_add(&r->y, &t); secp256k1_fe_mul(&r->y, &r->y, &i); - secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_negate(&h3, &h3, 1); - secp256k1_fe_add(&r->y, &h3); -} - - -static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b) { - /* Operations: 7 mul, 5 sqr, 4 normalize, 21 mul_int/add/negate/cmov */ - static const secp256k1_fe fe_1 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; - secp256k1_fe m_alt, rr_alt; - int infinity, degenerate; - VERIFY_CHECK(!b->infinity); - VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); - - /** In: - * Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks. - * In D. Naccache and P. Paillier, Eds., Public Key Cryptography, vol. 2274 of Lecture Notes in Computer Science, pages 335-345. Springer-Verlag, 2002. - * we find as solution for a unified addition/doubling formula: - * lambda = ((x1 + x2)^2 - x1 * x2 + a) / (y1 + y2), with a = 0 for secp256k1's curve equation. - * x3 = lambda^2 - (x1 + x2) - * 2*y3 = lambda * (x1 + x2 - 2 * x3) - (y1 + y2). - * - * Substituting x_i = Xi / Zi^2 and yi = Yi / Zi^3, for i=1,2,3, gives: - * U1 = X1*Z2^2, U2 = X2*Z1^2 - * S1 = Y1*Z2^3, S2 = Y2*Z1^3 - * Z = Z1*Z2 - * T = U1+U2 - * M = S1+S2 - * Q = T*M^2 - * R = T^2-U1*U2 - * X3 = 4*(R^2-Q) - * Y3 = 4*(R*(3*Q-2*R^2)-M^4) - * Z3 = 2*M*Z - * (Note that the paper uses xi = Xi / Zi and yi = Yi / Zi instead.) - * - * This formula has the benefit of being the same for both addition - * of distinct points and doubling. However, it breaks down in the - * case that either point is infinity, or that y1 = -y2. We handle - * these cases in the following ways: - * - * - If b is infinity we simply bail by means of a VERIFY_CHECK. - * - * - If a is infinity, we detect this, and at the end of the - * computation replace the result (which will be meaningless, - * but we compute to be constant-time) with b.x : b.y : 1. - * - * - If a = -b, we have y1 = -y2, which is a degenerate case. - * But here the answer is infinity, so we simply set the - * infinity flag of the result, overriding the computed values - * without even needing to cmov. - * - * - If y1 = -y2 but x1 != x2, which does occur thanks to certain - * properties of our curve (specifically, 1 has nontrivial cube - * roots in our field, and the curve equation has no x coefficient) - * then the answer is not infinity but also not given by the above - * equation. In this case, we cmov in place an alternate expression - * for lambda. Specifically (y1 - y2)/(x1 - x2). Where both these - * expressions for lambda are defined, they are equal, and can be - * obtained from each other by multiplication by (y1 + y2)/(y1 + y2) - * then substitution of x^3 + 7 for y^2 (using the curve equation). - * For all pairs of nonzero points (a, b) at least one is defined, - * so this covers everything. - */ - - secp256k1_fe_sqr(&zz, &a->z); /* z = Z1^2 */ - u1 = a->x; secp256k1_fe_normalize_weak(&u1); /* u1 = U1 = X1*Z2^2 (1) */ - secp256k1_fe_mul(&u2, &b->x, &zz); /* u2 = U2 = X2*Z1^2 (1) */ - s1 = a->y; secp256k1_fe_normalize_weak(&s1); /* s1 = S1 = Y1*Z2^3 (1) */ - secp256k1_fe_mul(&s2, &b->y, &zz); /* s2 = Y2*Z1^2 (1) */ - secp256k1_fe_mul(&s2, &s2, &a->z); /* s2 = S2 = Y2*Z1^3 (1) */ - t = u1; secp256k1_fe_add(&t, &u2); /* t = T = U1+U2 (2) */ - m = s1; secp256k1_fe_add(&m, &s2); /* m = M = S1+S2 (2) */ - secp256k1_fe_sqr(&rr, &t); /* rr = T^2 (1) */ - secp256k1_fe_negate(&m_alt, &u2, 1); /* Malt = -X2*Z1^2 */ - secp256k1_fe_mul(&tt, &u1, &m_alt); /* tt = -U1*U2 (2) */ - secp256k1_fe_add(&rr, &tt); /* rr = R = T^2-U1*U2 (3) */ - /** If lambda = R/M = 0/0 we have a problem (except in the "trivial" - * case that Z = z1z2 = 0, and this is special-cased later on). */ - degenerate = secp256k1_fe_normalizes_to_zero(&m) & - secp256k1_fe_normalizes_to_zero(&rr); - /* This only occurs when y1 == -y2 and x1^3 == x2^3, but x1 != x2. - * This means either x1 == beta*x2 or beta*x1 == x2, where beta is - * a nontrivial cube root of one. In either case, an alternate - * non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2), - * so we set R/M equal to this. */ - rr_alt = s1; - secp256k1_fe_mul_int(&rr_alt, 2); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */ - secp256k1_fe_add(&m_alt, &u1); /* Malt = X1*Z2^2 - X2*Z1^2 */ - - secp256k1_fe_cmov(&rr_alt, &rr, !degenerate); - secp256k1_fe_cmov(&m_alt, &m, !degenerate); - /* Now Ralt / Malt = lambda and is guaranteed not to be 0/0. - * From here on out Ralt and Malt represent the numerator - * and denominator of lambda; R and M represent the explicit - * expressions x1^2 + x2^2 + x1x2 and y1 + y2. */ - secp256k1_fe_sqr(&n, &m_alt); /* n = Malt^2 (1) */ - secp256k1_fe_mul(&q, &n, &t); /* q = Q = T*Malt^2 (1) */ - /* These two lines use the observation that either M == Malt or M == 0, - * so M^3 * Malt is either Malt^4 (which is computed by squaring), or - * zero (which is "computed" by cmov). So the cost is one squaring - * versus two multiplications. */ - secp256k1_fe_sqr(&n, &n); - secp256k1_fe_cmov(&n, &m, degenerate); /* n = M^3 * Malt (2) */ - secp256k1_fe_sqr(&t, &rr_alt); /* t = Ralt^2 (1) */ - secp256k1_fe_mul(&r->z, &a->z, &m_alt); /* r->z = Malt*Z (1) */ - infinity = secp256k1_fe_normalizes_to_zero(&r->z) * (1 - a->infinity); - secp256k1_fe_mul_int(&r->z, 2); /* r->z = Z3 = 2*Malt*Z (2) */ - secp256k1_fe_negate(&q, &q, 1); /* q = -Q (2) */ - secp256k1_fe_add(&t, &q); /* t = Ralt^2-Q (3) */ - secp256k1_fe_normalize_weak(&t); - r->x = t; /* r->x = Ralt^2-Q (1) */ - secp256k1_fe_mul_int(&t, 2); /* t = 2*x3 (2) */ - secp256k1_fe_add(&t, &q); /* t = 2*x3 - Q: (4) */ - secp256k1_fe_mul(&t, &t, &rr_alt); /* t = Ralt*(2*x3 - Q) (1) */ - secp256k1_fe_add(&t, &n); /* t = Ralt*(2*x3 - Q) + M^3*Malt (3) */ - secp256k1_fe_negate(&r->y, &t, 3); /* r->y = Ralt*(Q - 2x3) - M^3*Malt (4) */ - secp256k1_fe_normalize_weak(&r->y); - secp256k1_fe_mul_int(&r->x, 4); /* r->x = X3 = 4*(Ralt^2-Q) */ - secp256k1_fe_mul_int(&r->y, 4); /* r->y = Y3 = 4*Ralt*(Q - 2x3) - 4*M^3*Malt (4) */ - - /** In case a->infinity == 1, replace r with (b->x, b->y, 1). */ - secp256k1_fe_cmov(&r->x, &b->x, a->infinity); - secp256k1_fe_cmov(&r->y, &b->y, a->infinity); - secp256k1_fe_cmov(&r->z, &fe_1, a->infinity); - r->infinity = infinity; -} - -static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { - /* Operations: 4 mul, 1 sqr */ - secp256k1_fe zz; - VERIFY_CHECK(!secp256k1_fe_is_zero(s)); - secp256k1_fe_sqr(&zz, s); - secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ - secp256k1_fe_mul(&r->y, &r->y, &zz); - secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ - secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ -} - -static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) { - secp256k1_fe x, y; - VERIFY_CHECK(!a->infinity); - x = a->x; - secp256k1_fe_normalize(&x); - y = a->y; - secp256k1_fe_normalize(&y); - secp256k1_fe_to_storage(&r->x, &x); - secp256k1_fe_to_storage(&r->y, &y); -} - -static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a) { - secp256k1_fe_from_storage(&r->x, &a->x); - secp256k1_fe_from_storage(&r->y, &a->y); - r->infinity = 0; -} - -static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) { - secp256k1_fe_storage_cmov(&r->x, &a->x, flag); - secp256k1_fe_storage_cmov(&r->y, &a->y, flag); -} - -#ifdef USE_ENDOMORPHISM -static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { - static const secp256k1_fe beta = SECP256K1_FE_CONST( - 0x7ae96a2bul, 0x657c0710ul, 0x6e64479eul, 0xac3434e9ul, - 0x9cf04975ul, 0x12f58995ul, 0xc1396c28ul, 0x719501eeul - ); - *r = *a; - secp256k1_fe_mul(&r->x, &r->x, &beta); -} -#endif - -static int secp256k1_gej_has_quad_y_var(const secp256k1_gej *a) { - secp256k1_fe yz; - - if (a->infinity) { - return 0; - } - - /* We rely on the fact that the Jacobi symbol of 1 / a->z^3 is the same as - * that of a->z. Thus a->y / a->z^3 is a quadratic residue iff a->y * a->z - is */ - secp256k1_fe_mul(&yz, &a->y, &a->z); - return secp256k1_fe_is_quad_var(&yz); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash.h deleted file mode 100644 index fca98cab9..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash.h +++ /dev/null @@ -1,41 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_HASH_ -#define _SECP256K1_HASH_ - -#include -#include - -typedef struct { - uint32_t s[8]; - uint32_t buf[16]; /* In big endian */ - size_t bytes; -} secp256k1_sha256_t; - -static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash); -static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t size); -static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32); - -typedef struct { - secp256k1_sha256_t inner, outer; -} secp256k1_hmac_sha256_t; - -static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t size); -static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size); -static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32); - -typedef struct { - unsigned char v[32]; - unsigned char k[32]; - int retry; -} secp256k1_rfc6979_hmac_sha256_t; - -static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen); -static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen); -static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash_impl.h deleted file mode 100644 index b47e65f83..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/hash_impl.h +++ /dev/null @@ -1,281 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_HASH_IMPL_H_ -#define _SECP256K1_HASH_IMPL_H_ - -#include "hash.h" - -#include -#include -#include - -#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) -#define Maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y)))) -#define Sigma0(x) (((x) >> 2 | (x) << 30) ^ ((x) >> 13 | (x) << 19) ^ ((x) >> 22 | (x) << 10)) -#define Sigma1(x) (((x) >> 6 | (x) << 26) ^ ((x) >> 11 | (x) << 21) ^ ((x) >> 25 | (x) << 7)) -#define sigma0(x) (((x) >> 7 | (x) << 25) ^ ((x) >> 18 | (x) << 14) ^ ((x) >> 3)) -#define sigma1(x) (((x) >> 17 | (x) << 15) ^ ((x) >> 19 | (x) << 13) ^ ((x) >> 10)) - -#define Round(a,b,c,d,e,f,g,h,k,w) do { \ - uint32_t t1 = (h) + Sigma1(e) + Ch((e), (f), (g)) + (k) + (w); \ - uint32_t t2 = Sigma0(a) + Maj((a), (b), (c)); \ - (d) += t1; \ - (h) = t1 + t2; \ -} while(0) - -#ifdef WORDS_BIGENDIAN -#define BE32(x) (x) -#else -#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24)) -#endif - -static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash) { - hash->s[0] = 0x6a09e667ul; - hash->s[1] = 0xbb67ae85ul; - hash->s[2] = 0x3c6ef372ul; - hash->s[3] = 0xa54ff53aul; - hash->s[4] = 0x510e527ful; - hash->s[5] = 0x9b05688cul; - hash->s[6] = 0x1f83d9abul; - hash->s[7] = 0x5be0cd19ul; - hash->bytes = 0; -} - -/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */ -static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) { - uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7]; - uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; - - Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0])); - Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1])); - Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2])); - Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3])); - Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4])); - Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5])); - Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6])); - Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7])); - Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8])); - Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9])); - Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10])); - Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11])); - Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12])); - Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13])); - Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14])); - Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15])); - - Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1)); - Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2)); - Round(g, h, a, b, c, d, e, f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3)); - Round(f, g, h, a, b, c, d, e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4)); - Round(e, f, g, h, a, b, c, d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5)); - Round(d, e, f, g, h, a, b, c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6)); - Round(c, d, e, f, g, h, a, b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7)); - Round(b, c, d, e, f, g, h, a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8)); - Round(a, b, c, d, e, f, g, h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9)); - Round(h, a, b, c, d, e, f, g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10)); - Round(g, h, a, b, c, d, e, f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11)); - Round(f, g, h, a, b, c, d, e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12)); - Round(e, f, g, h, a, b, c, d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13)); - Round(d, e, f, g, h, a, b, c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14)); - Round(c, d, e, f, g, h, a, b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15)); - Round(b, c, d, e, f, g, h, a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0)); - - Round(a, b, c, d, e, f, g, h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1)); - Round(h, a, b, c, d, e, f, g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2)); - Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3)); - Round(f, g, h, a, b, c, d, e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4)); - Round(e, f, g, h, a, b, c, d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5)); - Round(d, e, f, g, h, a, b, c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6)); - Round(c, d, e, f, g, h, a, b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7)); - Round(b, c, d, e, f, g, h, a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8)); - Round(a, b, c, d, e, f, g, h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9)); - Round(h, a, b, c, d, e, f, g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10)); - Round(g, h, a, b, c, d, e, f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11)); - Round(f, g, h, a, b, c, d, e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12)); - Round(e, f, g, h, a, b, c, d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13)); - Round(d, e, f, g, h, a, b, c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14)); - Round(c, d, e, f, g, h, a, b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15)); - Round(b, c, d, e, f, g, h, a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0)); - - Round(a, b, c, d, e, f, g, h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1)); - Round(h, a, b, c, d, e, f, g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2)); - Round(g, h, a, b, c, d, e, f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3)); - Round(f, g, h, a, b, c, d, e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4)); - Round(e, f, g, h, a, b, c, d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5)); - Round(d, e, f, g, h, a, b, c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6)); - Round(c, d, e, f, g, h, a, b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7)); - Round(b, c, d, e, f, g, h, a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8)); - Round(a, b, c, d, e, f, g, h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9)); - Round(h, a, b, c, d, e, f, g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10)); - Round(g, h, a, b, c, d, e, f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11)); - Round(f, g, h, a, b, c, d, e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12)); - Round(e, f, g, h, a, b, c, d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13)); - Round(d, e, f, g, h, a, b, c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14)); - Round(c, d, e, f, g, h, a, b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15)); - Round(b, c, d, e, f, g, h, a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0)); - - s[0] += a; - s[1] += b; - s[2] += c; - s[3] += d; - s[4] += e; - s[5] += f; - s[6] += g; - s[7] += h; -} - -static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t len) { - size_t bufsize = hash->bytes & 0x3F; - hash->bytes += len; - while (bufsize + len >= 64) { - /* Fill the buffer, and process it. */ - memcpy(((unsigned char*)hash->buf) + bufsize, data, 64 - bufsize); - data += 64 - bufsize; - len -= 64 - bufsize; - secp256k1_sha256_transform(hash->s, hash->buf); - bufsize = 0; - } - if (len) { - /* Fill the buffer with what remains. */ - memcpy(((unsigned char*)hash->buf) + bufsize, data, len); - } -} - -static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32) { - static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - uint32_t sizedesc[2]; - uint32_t out[8]; - int i = 0; - sizedesc[0] = BE32(hash->bytes >> 29); - sizedesc[1] = BE32(hash->bytes << 3); - secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64)); - secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8); - for (i = 0; i < 8; i++) { - out[i] = BE32(hash->s[i]); - hash->s[i] = 0; - } - memcpy(out32, (const unsigned char*)out, 32); -} - -static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t keylen) { - int n; - unsigned char rkey[64]; - if (keylen <= 64) { - memcpy(rkey, key, keylen); - memset(rkey + keylen, 0, 64 - keylen); - } else { - secp256k1_sha256_t sha256; - secp256k1_sha256_initialize(&sha256); - secp256k1_sha256_write(&sha256, key, keylen); - secp256k1_sha256_finalize(&sha256, rkey); - memset(rkey + 32, 0, 32); - } - - secp256k1_sha256_initialize(&hash->outer); - for (n = 0; n < 64; n++) { - rkey[n] ^= 0x5c; - } - secp256k1_sha256_write(&hash->outer, rkey, 64); - - secp256k1_sha256_initialize(&hash->inner); - for (n = 0; n < 64; n++) { - rkey[n] ^= 0x5c ^ 0x36; - } - secp256k1_sha256_write(&hash->inner, rkey, 64); - memset(rkey, 0, 64); -} - -static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size) { - secp256k1_sha256_write(&hash->inner, data, size); -} - -static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32) { - unsigned char temp[32]; - secp256k1_sha256_finalize(&hash->inner, temp); - secp256k1_sha256_write(&hash->outer, temp, 32); - memset(temp, 0, 32); - secp256k1_sha256_finalize(&hash->outer, out32); -} - - -static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen) { - secp256k1_hmac_sha256_t hmac; - static const unsigned char zero[1] = {0x00}; - static const unsigned char one[1] = {0x01}; - - memset(rng->v, 0x01, 32); /* RFC6979 3.2.b. */ - memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */ - - /* RFC6979 3.2.d. */ - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, zero, 1); - secp256k1_hmac_sha256_write(&hmac, key, keylen); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); - - /* RFC6979 3.2.f. */ - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, one, 1); - secp256k1_hmac_sha256_write(&hmac, key, keylen); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); - rng->retry = 0; -} - -static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen) { - /* RFC6979 3.2.h. */ - static const unsigned char zero[1] = {0x00}; - if (rng->retry) { - secp256k1_hmac_sha256_t hmac; - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_write(&hmac, zero, 1); - secp256k1_hmac_sha256_finalize(&hmac, rng->k); - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); - } - - while (outlen > 0) { - secp256k1_hmac_sha256_t hmac; - int now = outlen; - secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32); - secp256k1_hmac_sha256_write(&hmac, rng->v, 32); - secp256k1_hmac_sha256_finalize(&hmac, rng->v); - if (now > 32) { - now = 32; - } - memcpy(out, rng->v, now); - out += now; - outlen -= now; - } - - rng->retry = 1; -} - -static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng) { - memset(rng->k, 0, 32); - memset(rng->v, 0, 32); - rng->retry = 0; -} - -#undef BE32 -#undef Round -#undef sigma1 -#undef sigma0 -#undef Sigma1 -#undef Sigma0 -#undef Maj -#undef Ch - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java deleted file mode 100644 index 1c67802fb..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java +++ /dev/null @@ -1,446 +0,0 @@ -/* - * Copyright 2013 Google Inc. - * Copyright 2014-2016 the libsecp256k1 contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bitcoin; - -import java.nio.ByteBuffer; -import java.nio.ByteOrder; - -import java.math.BigInteger; -import com.google.common.base.Preconditions; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import static org.bitcoin.NativeSecp256k1Util.*; - -/** - *

This class holds native methods to handle ECDSA verification.

- * - *

You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1

- * - *

To build secp256k1 for use with bitcoinj, run - * `./configure --enable-jni --enable-experimental --enable-module-ecdh` - * and `make` then copy `.libs/libsecp256k1.so` to your system library path - * or point the JVM to the folder containing it with -Djava.library.path - *

- */ -public class NativeSecp256k1 { - - private static final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); - private static final Lock r = rwl.readLock(); - private static final Lock w = rwl.writeLock(); - private static ThreadLocal nativeECDSABuffer = new ThreadLocal(); - /** - * Verifies the given secp256k1 signature in native code. - * Calling when enabled == false is undefined (probably library not loaded) - * - * @param data The data which was signed, must be exactly 32 bytes - * @param signature The signature - * @param pub The public key which did the signing - */ - public static boolean verify(byte[] data, byte[] signature, byte[] pub) throws AssertFailException{ - Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < 520) { - byteBuff = ByteBuffer.allocateDirect(520); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(data); - byteBuff.put(signature); - byteBuff.put(pub); - - byte[][] retByteArray; - - r.lock(); - try { - return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1; - } finally { - r.unlock(); - } - } - - /** - * libsecp256k1 Create an ECDSA signature. - * - * @param data Message hash, 32 bytes - * @param key Secret key, 32 bytes - * - * Return values - * @param sig byte array of signature - */ - public static byte[] sign(byte[] data, byte[] sec) throws AssertFailException{ - Preconditions.checkArgument(data.length == 32 && sec.length <= 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < 32 + 32) { - byteBuff = ByteBuffer.allocateDirect(32 + 32); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(data); - byteBuff.put(sec); - - byte[][] retByteArray; - - r.lock(); - try { - retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext()); - } finally { - r.unlock(); - } - - byte[] sigArr = retByteArray[0]; - int sigLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(sigArr.length, sigLen, "Got bad signature length."); - - return retVal == 0 ? new byte[0] : sigArr; - } - - /** - * libsecp256k1 Seckey Verify - returns 1 if valid, 0 if invalid - * - * @param seckey ECDSA Secret key, 32 bytes - */ - public static boolean secKeyVerify(byte[] seckey) { - Preconditions.checkArgument(seckey.length == 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < seckey.length) { - byteBuff = ByteBuffer.allocateDirect(seckey.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(seckey); - - r.lock(); - try { - return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1; - } finally { - r.unlock(); - } - } - - - /** - * libsecp256k1 Compute Pubkey - computes public key from secret key - * - * @param seckey ECDSA Secret key, 32 bytes - * - * Return values - * @param pubkey ECDSA Public key, 33 or 65 bytes - */ - //TODO add a 'compressed' arg - public static byte[] computePubkey(byte[] seckey) throws AssertFailException{ - Preconditions.checkArgument(seckey.length == 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < seckey.length) { - byteBuff = ByteBuffer.allocateDirect(seckey.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(seckey); - - byte[][] retByteArray; - - r.lock(); - try { - retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext()); - } finally { - r.unlock(); - } - - byte[] pubArr = retByteArray[0]; - int pubLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); - - return retVal == 0 ? new byte[0]: pubArr; - } - - /** - * libsecp256k1 Cleanup - This destroys the secp256k1 context object - * This should be called at the end of the program for proper cleanup of the context. - */ - public static synchronized void cleanup() { - w.lock(); - try { - secp256k1_destroy_context(Secp256k1Context.getContext()); - } finally { - w.unlock(); - } - } - - public static long cloneContext() { - r.lock(); - try { - return secp256k1_ctx_clone(Secp256k1Context.getContext()); - } finally { r.unlock(); } - } - - /** - * libsecp256k1 PrivKey Tweak-Mul - Tweak privkey by multiplying to it - * - * @param tweak some bytes to tweak with - * @param seckey 32-byte seckey - */ - public static byte[] privKeyTweakMul(byte[] privkey, byte[] tweak) throws AssertFailException{ - Preconditions.checkArgument(privkey.length == 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) { - byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(privkey); - byteBuff.put(tweak); - - byte[][] retByteArray; - r.lock(); - try { - retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext()); - } finally { - r.unlock(); - } - - byte[] privArr = retByteArray[0]; - - int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(privArr.length, privLen, "Got bad pubkey length."); - - assertEquals(retVal, 1, "Failed return value check."); - - return privArr; - } - - /** - * libsecp256k1 PrivKey Tweak-Add - Tweak privkey by adding to it - * - * @param tweak some bytes to tweak with - * @param seckey 32-byte seckey - */ - public static byte[] privKeyTweakAdd(byte[] privkey, byte[] tweak) throws AssertFailException{ - Preconditions.checkArgument(privkey.length == 32); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) { - byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(privkey); - byteBuff.put(tweak); - - byte[][] retByteArray; - r.lock(); - try { - retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext()); - } finally { - r.unlock(); - } - - byte[] privArr = retByteArray[0]; - - int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(privArr.length, privLen, "Got bad pubkey length."); - - assertEquals(retVal, 1, "Failed return value check."); - - return privArr; - } - - /** - * libsecp256k1 PubKey Tweak-Add - Tweak pubkey by adding to it - * - * @param tweak some bytes to tweak with - * @param pubkey 32-byte seckey - */ - public static byte[] pubKeyTweakAdd(byte[] pubkey, byte[] tweak) throws AssertFailException{ - Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) { - byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(pubkey); - byteBuff.put(tweak); - - byte[][] retByteArray; - r.lock(); - try { - retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length); - } finally { - r.unlock(); - } - - byte[] pubArr = retByteArray[0]; - - int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); - - assertEquals(retVal, 1, "Failed return value check."); - - return pubArr; - } - - /** - * libsecp256k1 PubKey Tweak-Mul - Tweak pubkey by multiplying to it - * - * @param tweak some bytes to tweak with - * @param pubkey 32-byte seckey - */ - public static byte[] pubKeyTweakMul(byte[] pubkey, byte[] tweak) throws AssertFailException{ - Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) { - byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(pubkey); - byteBuff.put(tweak); - - byte[][] retByteArray; - r.lock(); - try { - retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length); - } finally { - r.unlock(); - } - - byte[] pubArr = retByteArray[0]; - - int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF; - int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue(); - - assertEquals(pubArr.length, pubLen, "Got bad pubkey length."); - - assertEquals(retVal, 1, "Failed return value check."); - - return pubArr; - } - - /** - * libsecp256k1 create ECDH secret - constant time ECDH calculation - * - * @param seckey byte array of secret key used in exponentiaion - * @param pubkey byte array of public key used in exponentiaion - */ - public static byte[] createECDHSecret(byte[] seckey, byte[] pubkey) throws AssertFailException{ - Preconditions.checkArgument(seckey.length <= 32 && pubkey.length <= 65); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < 32 + pubkey.length) { - byteBuff = ByteBuffer.allocateDirect(32 + pubkey.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(seckey); - byteBuff.put(pubkey); - - byte[][] retByteArray; - r.lock(); - try { - retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length); - } finally { - r.unlock(); - } - - byte[] resArr = retByteArray[0]; - int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue(); - - assertEquals(resArr.length, 32, "Got bad result length."); - assertEquals(retVal, 1, "Failed return value check."); - - return resArr; - } - - /** - * libsecp256k1 randomize - updates the context randomization - * - * @param seed 32-byte random seed - */ - public static synchronized boolean randomize(byte[] seed) throws AssertFailException{ - Preconditions.checkArgument(seed.length == 32 || seed == null); - - ByteBuffer byteBuff = nativeECDSABuffer.get(); - if (byteBuff == null || byteBuff.capacity() < seed.length) { - byteBuff = ByteBuffer.allocateDirect(seed.length); - byteBuff.order(ByteOrder.nativeOrder()); - nativeECDSABuffer.set(byteBuff); - } - byteBuff.rewind(); - byteBuff.put(seed); - - w.lock(); - try { - return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1; - } finally { - w.unlock(); - } - } - - private static native long secp256k1_ctx_clone(long context); - - private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context); - - private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context); - - private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context); - - private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen); - - private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen); - - private static native void secp256k1_destroy_context(long context); - - private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen); - - private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context); - - private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context); - - private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context); - - private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen); - - private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen); - -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java deleted file mode 100644 index c00d08899..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java +++ /dev/null @@ -1,226 +0,0 @@ -package org.bitcoin; - -import com.google.common.io.BaseEncoding; -import java.util.Arrays; -import java.math.BigInteger; -import javax.xml.bind.DatatypeConverter; -import static org.bitcoin.NativeSecp256k1Util.*; - -/** - * This class holds test cases defined for testing this library. - */ -public class NativeSecp256k1Test { - - //TODO improve comments/add more tests - /** - * This tests verify() for a valid signature - */ - public static void testVerifyPos() throws AssertFailException{ - boolean result = false; - byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" - byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase()); - byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); - - result = NativeSecp256k1.verify( data, sig, pub); - assertEquals( result, true , "testVerifyPos"); - } - - /** - * This tests verify() for a non-valid signature - */ - public static void testVerifyNeg() throws AssertFailException{ - boolean result = false; - byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A91".toLowerCase()); //sha256hash of "testing" - byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase()); - byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); - - result = NativeSecp256k1.verify( data, sig, pub); - //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); - assertEquals( result, false , "testVerifyNeg"); - } - - /** - * This tests secret key verify() for a valid secretkey - */ - public static void testSecKeyVerifyPos() throws AssertFailException{ - boolean result = false; - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - - result = NativeSecp256k1.secKeyVerify( sec ); - //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); - assertEquals( result, true , "testSecKeyVerifyPos"); - } - - /** - * This tests secret key verify() for a invalid secretkey - */ - public static void testSecKeyVerifyNeg() throws AssertFailException{ - boolean result = false; - byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); - - result = NativeSecp256k1.secKeyVerify( sec ); - //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16)); - assertEquals( result, false , "testSecKeyVerifyNeg"); - } - - /** - * This tests public key create() for a valid secretkey - */ - public static void testPubKeyCreatePos() throws AssertFailException{ - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.computePubkey( sec); - String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( pubkeyString , "04C591A8FF19AC9C4E4E5793673B83123437E975285E7B442F4EE2654DFFCA5E2D2103ED494718C697AC9AEBCFD19612E224DB46661011863ED2FC54E71861E2A6" , "testPubKeyCreatePos"); - } - - /** - * This tests public key create() for a invalid secretkey - */ - public static void testPubKeyCreateNeg() throws AssertFailException{ - byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.computePubkey( sec); - String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( pubkeyString, "" , "testPubKeyCreateNeg"); - } - - /** - * This tests sign() for a valid secretkey - */ - public static void testSignPos() throws AssertFailException{ - - byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.sign(data, sec); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString, "30440220182A108E1448DC8F1FB467D06A0F3BB8EA0533584CB954EF8DA112F1D60E39A202201C66F36DA211C087F3AF88B50EDF4F9BDAA6CF5FD6817E74DCA34DB12390C6E9" , "testSignPos"); - } - - /** - * This tests sign() for a invalid secretkey - */ - public static void testSignNeg() throws AssertFailException{ - byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing" - byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.sign(data, sec); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString, "" , "testSignNeg"); - } - - /** - * This tests private key tweak-add - */ - public static void testPrivKeyTweakAdd_1() throws AssertFailException { - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" - - byte[] resultArr = NativeSecp256k1.privKeyTweakAdd( sec , data ); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString , "A168571E189E6F9A7E2D657A4B53AE99B909F7E712D1C23CED28093CD57C88F3" , "testPrivKeyAdd_1"); - } - - /** - * This tests private key tweak-mul - */ - public static void testPrivKeyTweakMul_1() throws AssertFailException { - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" - - byte[] resultArr = NativeSecp256k1.privKeyTweakMul( sec , data ); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString , "97F8184235F101550F3C71C927507651BD3F1CDB4A5A33B8986ACF0DEE20FFFC" , "testPrivKeyMul_1"); - } - - /** - * This tests private key tweak-add uncompressed - */ - public static void testPrivKeyTweakAdd_2() throws AssertFailException { - byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); - byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" - - byte[] resultArr = NativeSecp256k1.pubKeyTweakAdd( pub , data ); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString , "0411C6790F4B663CCE607BAAE08C43557EDC1A4D11D88DFCB3D841D0C6A941AF525A268E2A863C148555C48FB5FBA368E88718A46E205FABC3DBA2CCFFAB0796EF" , "testPrivKeyAdd_2"); - } - - /** - * This tests private key tweak-mul uncompressed - */ - public static void testPrivKeyTweakMul_2() throws AssertFailException { - byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); - byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak" - - byte[] resultArr = NativeSecp256k1.pubKeyTweakMul( pub , data ); - String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( sigString , "04E0FE6FE55EBCA626B98A807F6CAF654139E14E5E3698F01A9A658E21DC1D2791EC060D4F412A794D5370F672BC94B722640B5F76914151CFCA6E712CA48CC589" , "testPrivKeyMul_2"); - } - - /** - * This tests seed randomization - */ - public static void testRandomize() throws AssertFailException { - byte[] seed = BaseEncoding.base16().lowerCase().decode("A441B15FE9A3CF56661190A0B93B9DEC7D04127288CC87250967CF3B52894D11".toLowerCase()); //sha256hash of "random" - boolean result = NativeSecp256k1.randomize(seed); - assertEquals( result, true, "testRandomize"); - } - - public static void testCreateECDHSecret() throws AssertFailException{ - - byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase()); - byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase()); - - byte[] resultArr = NativeSecp256k1.createECDHSecret(sec, pub); - String ecdhString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr); - assertEquals( ecdhString, "2A2A67007A926E6594AF3EB564FC74005B37A9C8AEF2033C4552051B5C87F043" , "testCreateECDHSecret"); - } - - public static void main(String[] args) throws AssertFailException{ - - - System.out.println("\n libsecp256k1 enabled: " + Secp256k1Context.isEnabled() + "\n"); - - assertEquals( Secp256k1Context.isEnabled(), true, "isEnabled" ); - - //Test verify() success/fail - testVerifyPos(); - testVerifyNeg(); - - //Test secKeyVerify() success/fail - testSecKeyVerifyPos(); - testSecKeyVerifyNeg(); - - //Test computePubkey() success/fail - testPubKeyCreatePos(); - testPubKeyCreateNeg(); - - //Test sign() success/fail - testSignPos(); - testSignNeg(); - - //Test privKeyTweakAdd() 1 - testPrivKeyTweakAdd_1(); - - //Test privKeyTweakMul() 2 - testPrivKeyTweakMul_1(); - - //Test privKeyTweakAdd() 3 - testPrivKeyTweakAdd_2(); - - //Test privKeyTweakMul() 4 - testPrivKeyTweakMul_2(); - - //Test randomize() - testRandomize(); - - //Test ECDH - testCreateECDHSecret(); - - NativeSecp256k1.cleanup(); - - System.out.println(" All tests passed." ); - - } -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java deleted file mode 100644 index 04732ba04..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2014-2016 the libsecp256k1 contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bitcoin; - -public class NativeSecp256k1Util{ - - public static void assertEquals( int val, int val2, String message ) throws AssertFailException{ - if( val != val2 ) - throw new AssertFailException("FAIL: " + message); - } - - public static void assertEquals( boolean val, boolean val2, String message ) throws AssertFailException{ - if( val != val2 ) - throw new AssertFailException("FAIL: " + message); - else - System.out.println("PASS: " + message); - } - - public static void assertEquals( String val, String val2, String message ) throws AssertFailException{ - if( !val.equals(val2) ) - throw new AssertFailException("FAIL: " + message); - else - System.out.println("PASS: " + message); - } - - public static class AssertFailException extends Exception { - public AssertFailException(String message) { - super( message ); - } - } -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java deleted file mode 100644 index 216c986a8..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2014-2016 the libsecp256k1 contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bitcoin; - -/** - * This class holds the context reference used in native methods - * to handle ECDSA operations. - */ -public class Secp256k1Context { - private static final boolean enabled; //true if the library is loaded - private static final long context; //ref to pointer to context obj - - static { //static initializer - boolean isEnabled = true; - long contextRef = -1; - try { - System.loadLibrary("secp256k1"); - contextRef = secp256k1_init_context(); - } catch (UnsatisfiedLinkError e) { - System.out.println("UnsatisfiedLinkError: " + e.toString()); - isEnabled = false; - } - enabled = isEnabled; - context = contextRef; - } - - public static boolean isEnabled() { - return enabled; - } - - public static long getContext() { - if(!enabled) return -1; //sanity check - return context; - } - - private static native long secp256k1_init_context(); -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c deleted file mode 100644 index bcef7b32c..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c +++ /dev/null @@ -1,377 +0,0 @@ -#include -#include -#include -#include "org_bitcoin_NativeSecp256k1.h" -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" -#include "include/secp256k1_recovery.h" - - -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone - (JNIEnv* env, jclass classObject, jlong ctx_l) -{ - const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - - jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx); - - (void)classObject;(void)env; - - return ctx_clone_l; - -} - -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - - const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - - (void)classObject; - - return secp256k1_context_randomize(ctx, seed); - -} - -SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context - (JNIEnv* env, jclass classObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - - secp256k1_context_destroy(ctx); - - (void)classObject;(void)env; -} - -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - - unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* sigdata = { (unsigned char*) (data + 32) }; - const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) }; - - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pubkey; - - int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen); - - if( ret ) { - ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); - - if( ret ) { - ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey); - } - } - - (void)classObject; - - return ret; -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - unsigned char* secKey = (unsigned char*) (data + 32); - - jobjectArray retArray; - jbyteArray sigArray, intsByteArray; - unsigned char intsarray[2]; - - secp256k1_ecdsa_signature sig[72]; - - int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL ); - - unsigned char outputSer[72]; - size_t outputLen = 72; - - if( ret ) { - int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2; - } - - intsarray[0] = outputLen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - sigArray = (*env)->NewByteArray(env, outputLen); - (*env)->SetByteArrayRegion(env, sigArray, 0, outputLen, (jbyte*)outputSer); - (*env)->SetObjectArrayElement(env, retArray, 0, sigArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - - (void)classObject; - - return secp256k1_ec_seckey_verify(ctx, secKey); -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - - secp256k1_pubkey pubkey; - - jobjectArray retArray; - jbyteArray pubkeyArray, intsByteArray; - unsigned char intsarray[2]; - - int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey); - - unsigned char outputSer[65]; - size_t outputLen = 65; - - if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; - } - - intsarray[0] = outputLen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - pubkeyArray = (*env)->NewByteArray(env, outputLen); - (*env)->SetByteArrayRegion(env, pubkeyArray, 0, outputLen, (jbyte*)outputSer); - (*env)->SetObjectArrayElement(env, retArray, 0, pubkeyArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; - -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* tweak = (unsigned char*) (privkey + 32); - - jobjectArray retArray; - jbyteArray privArray, intsByteArray; - unsigned char intsarray[2]; - - int privkeylen = 32; - - int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak); - - intsarray[0] = privkeylen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - privArray = (*env)->NewByteArray(env, privkeylen); - (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey); - (*env)->SetObjectArrayElement(env, retArray, 0, privArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* tweak = (unsigned char*) (privkey + 32); - - jobjectArray retArray; - jbyteArray privArray, intsByteArray; - unsigned char intsarray[2]; - - int privkeylen = 32; - - int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak); - - intsarray[0] = privkeylen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - privArray = (*env)->NewByteArray(env, privkeylen); - (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey); - (*env)->SetObjectArrayElement(env, retArray, 0, privArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; -/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/ - unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* tweak = (unsigned char*) (pkey + publen); - - jobjectArray retArray; - jbyteArray pubArray, intsByteArray; - unsigned char intsarray[2]; - unsigned char outputSer[65]; - size_t outputLen = 65; - - secp256k1_pubkey pubkey; - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); - - if( ret ) { - ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak); - } - - if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; - } - - intsarray[0] = outputLen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - pubArray = (*env)->NewByteArray(env, outputLen); - (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer); - (*env)->SetObjectArrayElement(env, retArray, 0, pubArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* tweak = (unsigned char*) (pkey + publen); - - jobjectArray retArray; - jbyteArray pubArray, intsByteArray; - unsigned char intsarray[2]; - unsigned char outputSer[65]; - size_t outputLen = 65; - - secp256k1_pubkey pubkey; - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen); - - if ( ret ) { - ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak); - } - - if( ret ) { - int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2; - } - - intsarray[0] = outputLen; - intsarray[1] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - pubArray = (*env)->NewByteArray(env, outputLen); - (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer); - (*env)->SetObjectArrayElement(env, retArray, 0, pubArray); - - intsByteArray = (*env)->NewByteArray(env, 2); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} - -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine - (JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys) -{ - (void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys; - - return 0; -} - -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen) -{ - secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l; - const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject); - const unsigned char* pubdata = (const unsigned char*) (secdata + 32); - - jobjectArray retArray; - jbyteArray outArray, intsByteArray; - unsigned char intsarray[1]; - secp256k1_pubkey pubkey; - unsigned char nonce_res[32]; - size_t outputLen = 32; - - int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen); - - if (ret) { - ret = secp256k1_ecdh( - ctx, - nonce_res, - &pubkey, - secdata - ); - } - - intsarray[0] = ret; - - retArray = (*env)->NewObjectArray(env, 2, - (*env)->FindClass(env, "[B"), - (*env)->NewByteArray(env, 1)); - - outArray = (*env)->NewByteArray(env, outputLen); - (*env)->SetByteArrayRegion(env, outArray, 0, 32, (jbyte*)nonce_res); - (*env)->SetObjectArrayElement(env, retArray, 0, outArray); - - intsByteArray = (*env)->NewByteArray(env, 1); - (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray); - (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray); - - (void)classObject; - - return retArray; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h deleted file mode 100644 index fe613c9e9..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h +++ /dev/null @@ -1,119 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -#include "include/secp256k1.h" -/* Header for class org_bitcoin_NativeSecp256k1 */ - -#ifndef _Included_org_bitcoin_NativeSecp256k1 -#define _Included_org_bitcoin_NativeSecp256k1 -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ctx_clone - * Signature: (J)J - */ -SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone - (JNIEnv *, jclass, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_context_randomize - * Signature: (Ljava/nio/ByteBuffer;J)I - */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_privkey_tweak_add - * Signature: (Ljava/nio/ByteBuffer;J)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_privkey_tweak_mul - * Signature: (Ljava/nio/ByteBuffer;J)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_pubkey_tweak_add - * Signature: (Ljava/nio/ByteBuffer;JI)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add - (JNIEnv *, jclass, jobject, jlong, jint); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_pubkey_tweak_mul - * Signature: (Ljava/nio/ByteBuffer;JI)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul - (JNIEnv *, jclass, jobject, jlong, jint); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_destroy_context - * Signature: (J)V - */ -SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context - (JNIEnv *, jclass, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdsa_verify - * Signature: (Ljava/nio/ByteBuffer;JII)I - */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify - (JNIEnv *, jclass, jobject, jlong, jint, jint); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdsa_sign - * Signature: (Ljava/nio/ByteBuffer;J)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_seckey_verify - * Signature: (Ljava/nio/ByteBuffer;J)I - */ -SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_pubkey_create - * Signature: (Ljava/nio/ByteBuffer;J)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create - (JNIEnv *, jclass, jobject, jlong); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ec_pubkey_parse - * Signature: (Ljava/nio/ByteBuffer;JI)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse - (JNIEnv *, jclass, jobject, jlong, jint); - -/* - * Class: org_bitcoin_NativeSecp256k1 - * Method: secp256k1_ecdh - * Signature: (Ljava/nio/ByteBuffer;JI)[[B - */ -SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh - (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen); - - -#ifdef __cplusplus -} -#endif -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c deleted file mode 100644 index a52939e7e..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c +++ /dev/null @@ -1,15 +0,0 @@ -#include -#include -#include "org_bitcoin_Secp256k1Context.h" -#include "include/secp256k1.h" - -SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context - (JNIEnv* env, jclass classObject) -{ - secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - (void)classObject;(void)env; - - return (uintptr_t)ctx; -} - diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h deleted file mode 100644 index 0d2bc84b7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h +++ /dev/null @@ -1,22 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -#include "include/secp256k1.h" -/* Header for class org_bitcoin_Secp256k1Context */ - -#ifndef _Included_org_bitcoin_Secp256k1Context -#define _Included_org_bitcoin_Secp256k1Context -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_bitcoin_Secp256k1Context - * Method: secp256k1_init_context - * Signature: ()J - */ -SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context - (JNIEnv *, jclass); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include deleted file mode 100644 index e3088b469..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include +++ /dev/null @@ -1,8 +0,0 @@ -include_HEADERS += include/secp256k1_ecdh.h -noinst_HEADERS += src/modules/ecdh/main_impl.h -noinst_HEADERS += src/modules/ecdh/tests_impl.h -if USE_BENCHMARK -noinst_PROGRAMS += bench_ecdh -bench_ecdh_SOURCES = src/bench_ecdh.c -bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) -endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h deleted file mode 100644 index 9e30fb73d..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h +++ /dev/null @@ -1,54 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_MODULE_ECDH_MAIN_ -#define _SECP256K1_MODULE_ECDH_MAIN_ - -#include "include/secp256k1_ecdh.h" -#include "ecmult_const_impl.h" - -int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const secp256k1_pubkey *point, const unsigned char *scalar) { - int ret = 0; - int overflow = 0; - secp256k1_gej res; - secp256k1_ge pt; - secp256k1_scalar s; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(result != NULL); - ARG_CHECK(point != NULL); - ARG_CHECK(scalar != NULL); - - secp256k1_pubkey_load(ctx, &pt, point); - secp256k1_scalar_set_b32(&s, scalar, &overflow); - if (overflow || secp256k1_scalar_is_zero(&s)) { - ret = 0; - } else { - unsigned char x[32]; - unsigned char y[1]; - secp256k1_sha256_t sha; - - secp256k1_ecmult_const(&res, &pt, &s); - secp256k1_ge_set_gej(&pt, &res); - /* Compute a hash of the point in compressed form - * Note we cannot use secp256k1_eckey_pubkey_serialize here since it does not - * expect its output to be secret and has a timing sidechannel. */ - secp256k1_fe_normalize(&pt.x); - secp256k1_fe_normalize(&pt.y); - secp256k1_fe_get_b32(x, &pt.x); - y[0] = 0x02 | secp256k1_fe_is_odd(&pt.y); - - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, y, sizeof(y)); - secp256k1_sha256_write(&sha, x, sizeof(x)); - secp256k1_sha256_finalize(&sha, result); - ret = 1; - } - - secp256k1_scalar_clear(&s); - return ret; -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h deleted file mode 100644 index 85a5d0a9a..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h +++ /dev/null @@ -1,105 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_MODULE_ECDH_TESTS_ -#define _SECP256K1_MODULE_ECDH_TESTS_ - -void test_ecdh_api(void) { - /* Setup context that just counts errors */ - secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_pubkey point; - unsigned char res[32]; - unsigned char s_one[32] = { 0 }; - int32_t ecount = 0; - s_one[31] = 1; - - secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount); - CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1); - - /* Check all NULLs are detected */ - CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1); - CHECK(ecount == 0); - CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdh(tctx, res, NULL, s_one) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdh(tctx, res, &point, NULL) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1); - CHECK(ecount == 3); - - /* Cleanup */ - secp256k1_context_destroy(tctx); -} - -void test_ecdh_generator_basepoint(void) { - unsigned char s_one[32] = { 0 }; - secp256k1_pubkey point[2]; - int i; - - s_one[31] = 1; - /* Check against pubkey creation when the basepoint is the generator */ - for (i = 0; i < 100; ++i) { - secp256k1_sha256_t sha; - unsigned char s_b32[32]; - unsigned char output_ecdh[32]; - unsigned char output_ser[32]; - unsigned char point_ser[33]; - size_t point_ser_len = sizeof(point_ser); - secp256k1_scalar s; - - random_scalar_order(&s); - secp256k1_scalar_get_b32(s_b32, &s); - - /* compute using ECDH function */ - CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1); - CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32) == 1); - /* compute "explicitly" */ - CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1); - CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1); - CHECK(point_ser_len == sizeof(point_ser)); - secp256k1_sha256_initialize(&sha); - secp256k1_sha256_write(&sha, point_ser, point_ser_len); - secp256k1_sha256_finalize(&sha, output_ser); - /* compare */ - CHECK(memcmp(output_ecdh, output_ser, sizeof(output_ser)) == 0); - } -} - -void test_bad_scalar(void) { - unsigned char s_zero[32] = { 0 }; - unsigned char s_overflow[32] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 - }; - unsigned char s_rand[32] = { 0 }; - unsigned char output[32]; - secp256k1_scalar rand; - secp256k1_pubkey point; - - /* Create random point */ - random_scalar_order(&rand); - secp256k1_scalar_get_b32(s_rand, &rand); - CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1); - - /* Try to multiply it by bad values */ - CHECK(secp256k1_ecdh(ctx, output, &point, s_zero) == 0); - CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 0); - /* ...and a good one */ - s_overflow[31] -= 1; - CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 1); -} - -void run_ecdh_tests(void) { - test_ecdh_api(); - test_ecdh_generator_basepoint(); - test_bad_scalar(); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include deleted file mode 100644 index bf23c26e7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include +++ /dev/null @@ -1,8 +0,0 @@ -include_HEADERS += include/secp256k1_recovery.h -noinst_HEADERS += src/modules/recovery/main_impl.h -noinst_HEADERS += src/modules/recovery/tests_impl.h -if USE_BENCHMARK -noinst_PROGRAMS += bench_recover -bench_recover_SOURCES = src/bench_recover.c -bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) -endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h deleted file mode 100755 index c6fbe2398..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h +++ /dev/null @@ -1,193 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_MODULE_RECOVERY_MAIN_ -#define _SECP256K1_MODULE_RECOVERY_MAIN_ - -#include "include/secp256k1_recovery.h" - -static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) { - (void)ctx; - if (sizeof(secp256k1_scalar) == 32) { - /* When the secp256k1_scalar type is exactly 32 byte, use its - * representation inside secp256k1_ecdsa_signature, as conversion is very fast. - * Note that secp256k1_ecdsa_signature_save must use the same representation. */ - memcpy(r, &sig->data[0], 32); - memcpy(s, &sig->data[32], 32); - } else { - secp256k1_scalar_set_b32(r, &sig->data[0], NULL); - secp256k1_scalar_set_b32(s, &sig->data[32], NULL); - } - *recid = sig->data[64]; -} - -static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) { - if (sizeof(secp256k1_scalar) == 32) { - memcpy(&sig->data[0], r, 32); - memcpy(&sig->data[32], s, 32); - } else { - secp256k1_scalar_get_b32(&sig->data[0], r); - secp256k1_scalar_get_b32(&sig->data[32], s); - } - sig->data[64] = recid; -} - -int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) { - secp256k1_scalar r, s; - int ret = 1; - int overflow = 0; - - (void)ctx; - ARG_CHECK(sig != NULL); - ARG_CHECK(input64 != NULL); - ARG_CHECK(recid >= 0 && recid <= 3); - - secp256k1_scalar_set_b32(&r, &input64[0], &overflow); - ret &= !overflow; - secp256k1_scalar_set_b32(&s, &input64[32], &overflow); - ret &= !overflow; - if (ret) { - secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid); - } else { - memset(sig, 0, sizeof(*sig)); - } - return ret; -} - -int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) { - secp256k1_scalar r, s; - - (void)ctx; - ARG_CHECK(output64 != NULL); - ARG_CHECK(sig != NULL); - ARG_CHECK(recid != NULL); - - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig); - secp256k1_scalar_get_b32(&output64[0], &r); - secp256k1_scalar_get_b32(&output64[32], &s); - return 1; -} - -int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) { - secp256k1_scalar r, s; - int recid; - - (void)ctx; - ARG_CHECK(sig != NULL); - ARG_CHECK(sigin != NULL); - - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin); - secp256k1_ecdsa_signature_save(sig, &r, &s); - return 1; -} - -static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) { - unsigned char brx[32]; - secp256k1_fe fx; - secp256k1_ge x; - secp256k1_gej xj; - secp256k1_scalar rn, u1, u2; - secp256k1_gej qj; - int r; - - if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) { - return 0; - } - - secp256k1_scalar_get_b32(brx, sigr); - r = secp256k1_fe_set_b32(&fx, brx); - (void)r; - VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */ - if (recid & 2) { - if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) { - return 0; - } - secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe); - } - if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) { - return 0; - } - secp256k1_gej_set_ge(&xj, &x); - secp256k1_scalar_inverse_var(&rn, sigr); - secp256k1_scalar_mul(&u1, &rn, message); - secp256k1_scalar_negate(&u1, &u1); - secp256k1_scalar_mul(&u2, &rn, sigs); - secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1); - secp256k1_ge_set_gej_var(pubkey, &qj); - return !secp256k1_gej_is_infinity(&qj); -} - -int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar r, s; - secp256k1_scalar sec, non, msg; - int recid; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(signature != NULL); - ARG_CHECK(seckey != NULL); - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - /* Fail if the secret key is invalid. */ - if (!overflow && !secp256k1_scalar_is_zero(&sec)) { - unsigned char nonce32[32]; - unsigned int count = 0; - secp256k1_scalar_set_b32(&msg, msg32, NULL); - while (1) { - ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - if (!secp256k1_scalar_is_zero(&non) && !overflow) { - if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) { - break; - } - } - count++; - } - memset(nonce32, 0, 32); - secp256k1_scalar_clear(&msg); - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); - } - if (ret) { - secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid); - } else { - memset(signature, 0, sizeof(*signature)); - } - return ret; -} - -int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) { - secp256k1_ge q; - secp256k1_scalar r, s; - secp256k1_scalar m; - int recid; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(signature != NULL); - ARG_CHECK(pubkey != NULL); - - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature); - VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */ - secp256k1_scalar_set_b32(&m, msg32, NULL); - if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) { - secp256k1_pubkey_save(pubkey, &q); - return 1; - } else { - memset(pubkey, 0, sizeof(*pubkey)); - return 0; - } -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h deleted file mode 100644 index 765c7dd81..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h +++ /dev/null @@ -1,393 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_MODULE_RECOVERY_TESTS_ -#define _SECP256K1_MODULE_RECOVERY_TESTS_ - -static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { - (void) msg32; - (void) key32; - (void) algo16; - (void) data; - - /* On the first run, return 0 to force a second run */ - if (counter == 0) { - memset(nonce32, 0, 32); - return 1; - } - /* On the second run, return an overflow to force a third run */ - if (counter == 1) { - memset(nonce32, 0xff, 32); - return 1; - } - /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */ - memset(nonce32, 1, 32); - return secp256k1_rand_bits(1); -} - -void test_ecdsa_recovery_api(void) { - /* Setup contexts that just count errors */ - secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - secp256k1_pubkey pubkey; - secp256k1_pubkey recpubkey; - secp256k1_ecdsa_signature normal_sig; - secp256k1_ecdsa_recoverable_signature recsig; - unsigned char privkey[32] = { 1 }; - unsigned char message[32] = { 2 }; - int32_t ecount = 0; - int recid = 0; - unsigned char sig[74]; - unsigned char zero_privkey[32] = { 0 }; - unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - - secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount); - - /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); - - /* Check bad contexts and NULLs for signing */ - ecount = 0; - CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0); - CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0); - CHECK(ecount == 5); - /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */ - secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL); - CHECK(ecount == 5); - /* These will all fail, but not in ARG_CHECK way */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0); - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0); - /* This one will succeed. */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); - CHECK(ecount == 5); - - /* Check signing with a goofy nonce function */ - - /* Check bad contexts and NULLs for recovery */ - ecount = 0; - CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0); - CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0); - CHECK(ecount == 5); - - /* Check NULLs for conversion */ - CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1); - ecount = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1); - - /* Check NULLs for de/serialization */ - CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1); - ecount = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1); - - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0); - CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0); - CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0); - CHECK(ecount == 6); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0); - CHECK(ecount == 7); - /* overflow in signature will fail but not affect ecount */ - memcpy(sig, over_privkey, 32); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0); - CHECK(ecount == 7); - - /* cleanup */ - secp256k1_context_destroy(none); - secp256k1_context_destroy(sign); - secp256k1_context_destroy(vrfy); - secp256k1_context_destroy(both); -} - -void test_ecdsa_recovery_end_to_end(void) { - unsigned char extra[32] = {0x00}; - unsigned char privkey[32]; - unsigned char message[32]; - secp256k1_ecdsa_signature signature[5]; - secp256k1_ecdsa_recoverable_signature rsignature[5]; - unsigned char sig[74]; - secp256k1_pubkey pubkey; - secp256k1_pubkey recpubkey; - int recid = 0; - - /* Generate a random key and message. */ - { - secp256k1_scalar msg, key; - random_scalar_order_test(&msg); - random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_scalar_get_b32(message, &msg); - } - - /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); - - /* Serialize/parse compact and verify/recover. */ - extra[0] = 0; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1); - extra[31] = 1; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1); - extra[31] = 0; - extra[0] = 1; - CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(memcmp(&signature[4], &signature[0], 64) == 0); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); - memset(&rsignature[4], 0, sizeof(rsignature[4])); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1); - /* Parse compact (with recovery id) and recover. */ - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1); - CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0); - /* Serialize/destroy/parse signature and verify again. */ - CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1); - sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1); - CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0); - /* Recover again */ - CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 || - memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0); -} - -/* Tests several edge cases. */ -void test_ecdsa_recovery_edge_cases(void) { - const unsigned char msg32[32] = { - 'T', 'h', 'i', 's', ' ', 'i', 's', ' ', - 'a', ' ', 'v', 'e', 'r', 'y', ' ', 's', - 'e', 'c', 'r', 'e', 't', ' ', 'm', 'e', - 's', 's', 'a', 'g', 'e', '.', '.', '.' - }; - const unsigned char sig64[64] = { - /* Generated by signing the above message with nonce 'This is the nonce we will use...' - * and secret key 0 (which is not valid), resulting in recid 0. */ - 0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8, - 0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96, - 0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63, - 0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32, - 0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E, - 0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD, - 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86, - 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57 - }; - secp256k1_pubkey pubkey; - /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */ - const unsigned char sigb64[64] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }; - secp256k1_pubkey pubkeyb; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - int recid; - - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1)); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3)); - CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32)); - - for (recid = 0; recid < 4; recid++) { - int i; - int recid2; - /* (4,4) encoded in DER. */ - unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04}; - unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01}; - unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00}; - unsigned char sigbderalt1[39] = { - 0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04, - }; - unsigned char sigbderalt2[39] = { - 0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }; - unsigned char sigbderalt3[40] = { - 0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04, - }; - unsigned char sigbderalt4[40] = { - 0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }; - /* (order + r,4) encoded in DER. */ - unsigned char sigbderlong[40] = { - 0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC, - 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E, - 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04 - }; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1); - for (recid2 = 0; recid2 < 4; recid2++) { - secp256k1_pubkey pubkey2b; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1); - /* Verifying with (order + r,4) should always fail. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); - } - /* DER parsing tests. */ - /* Zero length r/s. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0); - /* Leading zeros. */ - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0); - sigbderalt3[4] = 1; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); - sigbderalt4[7] = 1; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); - /* Damage signature. */ - sigbder[7]++; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); - sigbder[7]--; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0); - for(i = 0; i < 8; i++) { - int c; - unsigned char orig = sigbder[i]; - /*Try every single-byte change.*/ - for (c = 0; c < 256; c++) { - if (c == orig ) { - continue; - } - sigbder[i] = c; - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0); - } - sigbder[i] = orig; - } - } - - /* Test r/s equal to zero */ - { - /* (1,1) encoded in DER. */ - unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01}; - unsigned char sigc64[64] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }; - secp256k1_pubkey pubkeyc; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1); - sigcder[4] = 0; - sigc64[31] = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); - sigcder[4] = 1; - sigcder[7] = 0; - sigc64[31] = 1; - sigc64[63] = 0; - CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1); - CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0); - } -} - -void run_recovery_tests(void) { - int i; - for (i = 0; i < count; i++) { - test_ecdsa_recovery_api(); - } - for (i = 0; i < 64*count; i++) { - test_ecdsa_recovery_end_to_end(); - } - test_ecdsa_recovery_edge_cases(); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num.h deleted file mode 100644 index eff842200..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num.h +++ /dev/null @@ -1,74 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_NUM_ -#define _SECP256K1_NUM_ - -#ifndef USE_NUM_NONE - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(USE_NUM_GMP) -#include "num_gmp.h" -#else -#error "Please select num implementation" -#endif - -/** Copy a number. */ -static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a); - -/** Convert a number's absolute value to a binary big-endian string. - * There must be enough place. */ -static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a); - -/** Set a number to the value of a binary big-endian string. */ -static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen); - -/** Compute a modular inverse. The input must be less than the modulus. */ -static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m); - -/** Compute the jacobi symbol (a|b). b must be positive and odd. */ -static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b); - -/** Compare the absolute value of two numbers. */ -static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b); - -/** Test whether two number are equal (including sign). */ -static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b); - -/** Add two (signed) numbers. */ -static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); - -/** Subtract two (signed) numbers. */ -static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); - -/** Multiply two (signed) numbers. */ -static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b); - -/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, - even if r was negative. */ -static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m); - -/** Right-shift the passed number by bits. */ -static void secp256k1_num_shift(secp256k1_num *r, int bits); - -/** Check whether a number is zero. */ -static int secp256k1_num_is_zero(const secp256k1_num *a); - -/** Check whether a number is one. */ -static int secp256k1_num_is_one(const secp256k1_num *a); - -/** Check whether a number is strictly negative. */ -static int secp256k1_num_is_neg(const secp256k1_num *a); - -/** Change a number's sign. */ -static void secp256k1_num_negate(secp256k1_num *r); - -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp.h deleted file mode 100644 index 7dd813088..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp.h +++ /dev/null @@ -1,20 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_NUM_REPR_ -#define _SECP256K1_NUM_REPR_ - -#include - -#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS) - -typedef struct { - mp_limb_t data[2*NUM_LIMBS]; - int neg; - int limbs; -} secp256k1_num; - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp_impl.h deleted file mode 100644 index 3a46495ee..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_gmp_impl.h +++ /dev/null @@ -1,288 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_NUM_REPR_IMPL_H_ -#define _SECP256K1_NUM_REPR_IMPL_H_ - -#include -#include -#include - -#include "util.h" -#include "num.h" - -#ifdef VERIFY -static void secp256k1_num_sanity(const secp256k1_num *a) { - VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0)); -} -#else -#define secp256k1_num_sanity(a) do { } while(0) -#endif - -static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) { - *r = *a; -} - -static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) { - unsigned char tmp[65]; - int len = 0; - int shift = 0; - if (a->limbs>1 || a->data[0] != 0) { - len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs); - } - while (shift < len && tmp[shift] == 0) shift++; - VERIFY_CHECK(len-shift <= (int)rlen); - memset(r, 0, rlen - len + shift); - if (len > shift) { - memcpy(r + rlen - len + shift, tmp + shift, len - shift); - } - memset(tmp, 0, sizeof(tmp)); -} - -static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) { - int len; - VERIFY_CHECK(alen > 0); - VERIFY_CHECK(alen <= 64); - len = mpn_set_str(r->data, a, alen, 256); - if (len == 0) { - r->data[0] = 0; - len = 1; - } - VERIFY_CHECK(len <= NUM_LIMBS*2); - r->limbs = len; - r->neg = 0; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); - r->limbs = a->limbs; - if (c != 0) { - VERIFY_CHECK(r->limbs < 2*NUM_LIMBS); - r->data[r->limbs++] = c; - } -} - -static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs); - (void)c; - VERIFY_CHECK(c == 0); - r->limbs = a->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) { - secp256k1_num_sanity(r); - secp256k1_num_sanity(m); - - if (r->limbs >= m->limbs) { - mp_limb_t t[2*NUM_LIMBS]; - mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs); - memset(t, 0, sizeof(t)); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } - - if (r->neg && (r->limbs > 1 || r->data[0] != 0)) { - secp256k1_num_sub_abs(r, m, r); - r->neg = 0; - } -} - -static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) { - int i; - mp_limb_t g[NUM_LIMBS+1]; - mp_limb_t u[NUM_LIMBS+1]; - mp_limb_t v[NUM_LIMBS+1]; - mp_size_t sn; - mp_size_t gn; - secp256k1_num_sanity(a); - secp256k1_num_sanity(m); - - /** mpn_gcdext computes: (G,S) = gcdext(U,V), where - * * G = gcd(U,V) - * * G = U*S + V*T - * * U has equal or more limbs than V, and V has no padding - * If we set U to be (a padded version of) a, and V = m: - * G = a*S + m*T - * G = a*S mod m - * Assuming G=1: - * S = 1/a mod m - */ - VERIFY_CHECK(m->limbs <= NUM_LIMBS); - VERIFY_CHECK(m->data[m->limbs-1] != 0); - for (i = 0; i < m->limbs; i++) { - u[i] = (i < a->limbs) ? a->data[i] : 0; - v[i] = m->data[i]; - } - sn = NUM_LIMBS+1; - gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs); - (void)gn; - VERIFY_CHECK(gn == 1); - VERIFY_CHECK(g[0] == 1); - r->neg = a->neg ^ m->neg; - if (sn < 0) { - mpn_sub(r->data, m->data, m->limbs, r->data, -sn); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } else { - r->limbs = sn; - } - memset(g, 0, sizeof(g)); - memset(u, 0, sizeof(u)); - memset(v, 0, sizeof(v)); -} - -static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) { - int ret; - mpz_t ga, gb; - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); - - mpz_inits(ga, gb, NULL); - - mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data); - mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data); - if (a->neg) { - mpz_neg(ga, ga); - } - - ret = mpz_jacobi(ga, gb); - - mpz_clears(ga, gb, NULL); - - return ret; -} - -static int secp256k1_num_is_one(const secp256k1_num *a) { - return (a->limbs == 1 && a->data[0] == 1); -} - -static int secp256k1_num_is_zero(const secp256k1_num *a) { - return (a->limbs == 1 && a->data[0] == 0); -} - -static int secp256k1_num_is_neg(const secp256k1_num *a) { - return (a->limbs > 1 || a->data[0] != 0) && a->neg; -} - -static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) { - if (a->limbs > b->limbs) { - return 1; - } - if (a->limbs < b->limbs) { - return -1; - } - return mpn_cmp(a->data, b->data, a->limbs); -} - -static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) { - if (a->limbs > b->limbs) { - return 0; - } - if (a->limbs < b->limbs) { - return 0; - } - if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) { - return 0; - } - return mpn_cmp(a->data, b->data, a->limbs) == 0; -} - -static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) { - if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ - r->neg = a->neg; - if (a->limbs >= b->limbs) { - secp256k1_num_add_abs(r, a, b); - } else { - secp256k1_num_add_abs(r, b, a); - } - } else { - if (secp256k1_num_cmp(a, b) > 0) { - r->neg = a->neg; - secp256k1_num_sub_abs(r, a, b); - } else { - r->neg = b->neg ^ bneg; - secp256k1_num_sub_abs(r, b, a); - } - } -} - -static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - secp256k1_num_subadd(r, a, b, 0); -} - -static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - secp256k1_num_subadd(r, a, b, 1); -} - -static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) { - mp_limb_t tmp[2*NUM_LIMBS+1]; - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - - VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1); - if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) { - r->limbs = 1; - r->neg = 0; - r->data[0] = 0; - return; - } - if (a->limbs >= b->limbs) { - mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs); - } else { - mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs); - } - r->limbs = a->limbs + b->limbs; - if (r->limbs > 1 && tmp[r->limbs - 1]==0) { - r->limbs--; - } - VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS); - mpn_copyi(r->data, tmp, r->limbs); - r->neg = a->neg ^ b->neg; - memset(tmp, 0, sizeof(tmp)); -} - -static void secp256k1_num_shift(secp256k1_num *r, int bits) { - if (bits % GMP_NUMB_BITS) { - /* Shift within limbs. */ - mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); - } - if (bits >= GMP_NUMB_BITS) { - int i; - /* Shift full limbs. */ - for (i = 0; i < r->limbs; i++) { - int index = i + (bits / GMP_NUMB_BITS); - if (index < r->limbs && index < 2*NUM_LIMBS) { - r->data[i] = r->data[index]; - } else { - r->data[i] = 0; - } - } - } - while (r->limbs>1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void secp256k1_num_negate(secp256k1_num *r) { - r->neg ^= 1; -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_impl.h deleted file mode 100644 index 0b0e3a072..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/num_impl.h +++ /dev/null @@ -1,24 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_NUM_IMPL_H_ -#define _SECP256K1_NUM_IMPL_H_ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include "num.h" - -#if defined(USE_NUM_GMP) -#include "num_gmp_impl.h" -#elif defined(USE_NUM_NONE) -/* Nothing. */ -#else -#error "Please select num implementation" -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar.h deleted file mode 100644 index 27e9d8375..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar.h +++ /dev/null @@ -1,106 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_ -#define _SECP256K1_SCALAR_ - -#include "num.h" - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(EXHAUSTIVE_TEST_ORDER) -#include "scalar_low.h" -#elif defined(USE_SCALAR_4X64) -#include "scalar_4x64.h" -#elif defined(USE_SCALAR_8X32) -#include "scalar_8x32.h" -#else -#error "Please select scalar implementation" -#endif - -/** Clear a scalar to prevent the leak of sensitive data. */ -static void secp256k1_scalar_clear(secp256k1_scalar *r); - -/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */ -static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count); - -/** Access bits from a scalar. Not constant time. */ -static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count); - -/** Set a scalar from a big endian byte array. */ -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow); - -/** Set a scalar to an unsigned integer. */ -static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v); - -/** Convert a scalar to a byte array. */ -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a); - -/** Add two scalars together (modulo the group order). Returns whether it overflowed. */ -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); - -/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */ -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag); - -/** Multiply two scalars (modulo the group order). */ -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b); - -/** Shift a scalar right by some amount strictly between 0 and 16, returning - * the low bits that were shifted off */ -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n); - -/** Compute the square of a scalar (modulo the group order). */ -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a); - -/** Compute the inverse of a scalar (modulo the group order). */ -static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a); - -/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */ -static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a); - -/** Compute the complement of a scalar (modulo the group order). */ -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a); - -/** Check whether a scalar equals zero. */ -static int secp256k1_scalar_is_zero(const secp256k1_scalar *a); - -/** Check whether a scalar equals one. */ -static int secp256k1_scalar_is_one(const secp256k1_scalar *a); - -/** Check whether a scalar, considered as an nonnegative integer, is even. */ -static int secp256k1_scalar_is_even(const secp256k1_scalar *a); - -/** Check whether a scalar is higher than the group order divided by 2. */ -static int secp256k1_scalar_is_high(const secp256k1_scalar *a); - -/** Conditionally negate a number, in constant time. - * Returns -1 if the number was negated, 1 otherwise */ -static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag); - -#ifndef USE_NUM_NONE -/** Convert a scalar to a number. */ -static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a); - -/** Get the order of the group as a number. */ -static void secp256k1_scalar_order_get_num(secp256k1_num *r); -#endif - -/** Compare two scalars. */ -static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b); - -#ifdef USE_ENDOMORPHISM -/** Find r1 and r2 such that r1+r2*2^128 = a. */ -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); -/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a); -#endif - -/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ -static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64.h deleted file mode 100644 index cff406038..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64.h +++ /dev/null @@ -1,19 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_ -#define _SECP256K1_SCALAR_REPR_ - -#include - -/** A scalar modulo the group order of the secp256k1 curve. */ -typedef struct { - uint64_t d[4]; -} secp256k1_scalar; - -#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64_impl.h deleted file mode 100644 index 56e7bd82a..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_4x64_impl.h +++ /dev/null @@ -1,949 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ -#define _SECP256K1_SCALAR_REPR_IMPL_H_ - -/* Limbs of the secp256k1 order. */ -#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) -#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) -#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL) -#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) - -/* Limbs of 2^256 minus the secp256k1 order. */ -#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) -#define SECP256K1_N_C_1 (~SECP256K1_N_1) -#define SECP256K1_N_C_2 (1) - -/* Limbs of half the secp256k1 order. */ -#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL) -#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL) -#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL) -#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL) - -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { - r->d[0] = 0; - r->d[1] = 0; - r->d[2] = 0; - r->d[3] = 0; -} - -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { - r->d[0] = v; - r->d[1] = 0; - r->d[2] = 0; - r->d[3] = 0; -} - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); - return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); -} - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - VERIFY_CHECK(count < 32); - VERIFY_CHECK(offset + count <= 256); - if ((offset + count - 1) >> 6 == offset >> 6) { - return secp256k1_scalar_get_bits(a, offset, count); - } else { - VERIFY_CHECK((offset >> 6) + 1 < 4); - return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1); - } -} - -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { - int yes = 0; - int no = 0; - no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */ - no |= (a->d[2] < SECP256K1_N_2); - yes |= (a->d[2] > SECP256K1_N_2) & ~no; - no |= (a->d[1] < SECP256K1_N_1); - yes |= (a->d[1] > SECP256K1_N_1) & ~no; - yes |= (a->d[0] >= SECP256K1_N_0) & ~no; - return yes; -} - -SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) { - uint128_t t; - VERIFY_CHECK(overflow <= 1); - t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0; - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1; - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2; - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint64_t)r->d[3]; - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; - return overflow; -} - -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - int overflow; - uint128_t t = (uint128_t)a->d[0] + b->d[0]; - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[1] + b->d[1]; - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[2] + b->d[2]; - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)a->d[3] + b->d[3]; - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - overflow = t + secp256k1_scalar_check_overflow(r); - VERIFY_CHECK(overflow == 0 || overflow == 1); - secp256k1_scalar_reduce(r, overflow); - return overflow; -} - -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { - uint128_t t; - VERIFY_CHECK(bit < 256); - bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ - t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F)); - r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F)); - r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F)); - r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); - r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; -#ifdef VERIFY - VERIFY_CHECK((t >> 64) == 0); - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif -} - -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { - int over; - r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56; - r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56; - r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56; - r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56; - over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); - if (overflow) { - *overflow = over; - } -} - -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3]; - bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2]; - bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1]; - bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; -} - -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; -} - -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); - uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1; - r->d[0] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[1]) + SECP256K1_N_1; - r->d[1] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[2]) + SECP256K1_N_2; - r->d[2] = t & nonzero; t >>= 64; - t += (uint128_t)(~a->d[3]) + SECP256K1_N_3; - r->d[3] = t & nonzero; -} - -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; -} - -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { - int yes = 0; - int no = 0; - no |= (a->d[3] < SECP256K1_N_H_3); - yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; - no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */ - no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; - yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; - yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; - return yes; -} - -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ - uint64_t mask = !flag - 1; - uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; - uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - r->d[0] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); - r->d[1] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); - r->d[2] = t & nonzero; t >>= 64; - t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); - r->d[3] = t & nonzero; - return 2 * (mask == 0) - 1; -} - -/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ - -/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd(a,b) { \ - uint64_t tl, th; \ - { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ - } \ - c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ - c1 += th; /* overflow is handled on the next line */ \ - c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ - VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ -} - -/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ -#define muladd_fast(a,b) { \ - uint64_t tl, th; \ - { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ - } \ - c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ - c1 += th; /* never overflows by contract (verified in the next line) */ \ - VERIFY_CHECK(c1 >= th); \ -} - -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint64_t tl, th, th2, tl2; \ - { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ - c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ - th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - -/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define sumadd(a) { \ - unsigned int over; \ - c0 += (a); /* overflow is handled on the next line */ \ - over = (c0 < (a)) ? 1 : 0; \ - c1 += over; /* overflow is handled on the next line */ \ - c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ -} - -/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ -#define sumadd_fast(a) { \ - c0 += (a); /* overflow is handled on the next line */ \ - c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ - VERIFY_CHECK(c2 == 0); \ -} - -/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */ -#define extract(n) { \ - (n) = c0; \ - c0 = c1; \ - c1 = c2; \ - c2 = 0; \ -} - -/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */ -#define extract_fast(n) { \ - (n) = c0; \ - c0 = c1; \ - c1 = 0; \ - VERIFY_CHECK(c2 == 0); \ -} - -static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) { -#ifdef USE_ASM_X86_64 - /* Reduce 512 bits into 385. */ - uint64_t m0, m1, m2, m3, m4, m5, m6; - uint64_t p0, p1, p2, p3, p4; - uint64_t c; - - __asm__ __volatile__( - /* Preload. */ - "movq 32(%%rsi), %%r11\n" - "movq 40(%%rsi), %%r12\n" - "movq 48(%%rsi), %%r13\n" - "movq 56(%%rsi), %%r14\n" - /* Initialize r8,r9,r10 */ - "movq 0(%%rsi), %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9) += n0 * c0 */ - "movq %8, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - /* extract m0 */ - "movq %%r8, %q0\n" - "xorq %%r8, %%r8\n" - /* (r9,r10) += l1 */ - "addq 8(%%rsi), %%r9\n" - "adcq $0, %%r10\n" - /* (r9,r10,r8) += n1 * c0 */ - "movq %8, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += n0 * c1 */ - "movq %9, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* extract m1 */ - "movq %%r9, %q1\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += l2 */ - "addq 16(%%rsi), %%r10\n" - "adcq $0, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += n2 * c0 */ - "movq %8, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += n1 * c1 */ - "movq %9, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += n0 */ - "addq %%r11, %%r10\n" - "adcq $0, %%r8\n" - "adcq $0, %%r9\n" - /* extract m2 */ - "movq %%r10, %q2\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += l3 */ - "addq 24(%%rsi), %%r8\n" - "adcq $0, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += n3 * c0 */ - "movq %8, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += n2 * c1 */ - "movq %9, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += n1 */ - "addq %%r12, %%r8\n" - "adcq $0, %%r9\n" - "adcq $0, %%r10\n" - /* extract m3 */ - "movq %%r8, %q3\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += n3 * c1 */ - "movq %9, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += n2 */ - "addq %%r13, %%r9\n" - "adcq $0, %%r10\n" - "adcq $0, %%r8\n" - /* extract m4 */ - "movq %%r9, %q4\n" - /* (r10,r8) += n3 */ - "addq %%r14, %%r10\n" - "adcq $0, %%r8\n" - /* extract m5 */ - "movq %%r10, %q5\n" - /* extract m6 */ - "movq %%r8, %q6\n" - : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6) - : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) - : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc"); - - /* Reduce 385 bits into 258. */ - __asm__ __volatile__( - /* Preload */ - "movq %q9, %%r11\n" - "movq %q10, %%r12\n" - "movq %q11, %%r13\n" - /* Initialize (r8,r9,r10) */ - "movq %q5, %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9) += m4 * c0 */ - "movq %12, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - /* extract p0 */ - "movq %%r8, %q0\n" - "xorq %%r8, %%r8\n" - /* (r9,r10) += m1 */ - "addq %q6, %%r9\n" - "adcq $0, %%r10\n" - /* (r9,r10,r8) += m5 * c0 */ - "movq %12, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += m4 * c1 */ - "movq %13, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* extract p1 */ - "movq %%r9, %q1\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += m2 */ - "addq %q7, %%r10\n" - "adcq $0, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += m6 * c0 */ - "movq %12, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += m5 * c1 */ - "movq %13, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += m4 */ - "addq %%r11, %%r10\n" - "adcq $0, %%r8\n" - "adcq $0, %%r9\n" - /* extract p2 */ - "movq %%r10, %q2\n" - /* (r8,r9) += m3 */ - "addq %q8, %%r8\n" - "adcq $0, %%r9\n" - /* (r8,r9) += m6 * c1 */ - "movq %13, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - /* (r8,r9) += m5 */ - "addq %%r12, %%r8\n" - "adcq $0, %%r9\n" - /* extract p3 */ - "movq %%r8, %q3\n" - /* (r9) += m6 */ - "addq %%r13, %%r9\n" - /* extract p4 */ - "movq %%r9, %q4\n" - : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4) - : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) - : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc"); - - /* Reduce 258 bits into 256. */ - __asm__ __volatile__( - /* Preload */ - "movq %q5, %%r10\n" - /* (rax,rdx) = p4 * c0 */ - "movq %7, %%rax\n" - "mulq %%r10\n" - /* (rax,rdx) += p0 */ - "addq %q1, %%rax\n" - "adcq $0, %%rdx\n" - /* extract r0 */ - "movq %%rax, 0(%q6)\n" - /* Move to (r8,r9) */ - "movq %%rdx, %%r8\n" - "xorq %%r9, %%r9\n" - /* (r8,r9) += p1 */ - "addq %q2, %%r8\n" - "adcq $0, %%r9\n" - /* (r8,r9) += p4 * c1 */ - "movq %8, %%rax\n" - "mulq %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - /* Extract r1 */ - "movq %%r8, 8(%q6)\n" - "xorq %%r8, %%r8\n" - /* (r9,r8) += p4 */ - "addq %%r10, %%r9\n" - "adcq $0, %%r8\n" - /* (r9,r8) += p2 */ - "addq %q3, %%r9\n" - "adcq $0, %%r8\n" - /* Extract r2 */ - "movq %%r9, 16(%q6)\n" - "xorq %%r9, %%r9\n" - /* (r8,r9) += p3 */ - "addq %q4, %%r8\n" - "adcq $0, %%r9\n" - /* Extract r3 */ - "movq %%r8, 24(%q6)\n" - /* Extract c */ - "movq %%r9, %q0\n" - : "=g"(c) - : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) - : "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); -#else - uint128_t c; - uint64_t c0, c1, c2; - uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7]; - uint64_t m0, m1, m2, m3, m4, m5; - uint32_t m6; - uint64_t p0, p1, p2, p3; - uint32_t p4; - - /* Reduce 512 bits into 385. */ - /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */ - c0 = l[0]; c1 = 0; c2 = 0; - muladd_fast(n0, SECP256K1_N_C_0); - extract_fast(m0); - sumadd_fast(l[1]); - muladd(n1, SECP256K1_N_C_0); - muladd(n0, SECP256K1_N_C_1); - extract(m1); - sumadd(l[2]); - muladd(n2, SECP256K1_N_C_0); - muladd(n1, SECP256K1_N_C_1); - sumadd(n0); - extract(m2); - sumadd(l[3]); - muladd(n3, SECP256K1_N_C_0); - muladd(n2, SECP256K1_N_C_1); - sumadd(n1); - extract(m3); - muladd(n3, SECP256K1_N_C_1); - sumadd(n2); - extract(m4); - sumadd_fast(n3); - extract_fast(m5); - VERIFY_CHECK(c0 <= 1); - m6 = c0; - - /* Reduce 385 bits into 258. */ - /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */ - c0 = m0; c1 = 0; c2 = 0; - muladd_fast(m4, SECP256K1_N_C_0); - extract_fast(p0); - sumadd_fast(m1); - muladd(m5, SECP256K1_N_C_0); - muladd(m4, SECP256K1_N_C_1); - extract(p1); - sumadd(m2); - muladd(m6, SECP256K1_N_C_0); - muladd(m5, SECP256K1_N_C_1); - sumadd(m4); - extract(p2); - sumadd_fast(m3); - muladd_fast(m6, SECP256K1_N_C_1); - sumadd_fast(m5); - extract_fast(p3); - p4 = c0 + m6; - VERIFY_CHECK(p4 <= 2); - - /* Reduce 258 bits into 256. */ - /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */ - c = p0 + (uint128_t)SECP256K1_N_C_0 * p4; - r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p1 + (uint128_t)SECP256K1_N_C_1 * p4; - r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p2 + (uint128_t)p4; - r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; - c += p3; - r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64; -#endif - - /* Final reduction of r. */ - secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); -} - -static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) { -#ifdef USE_ASM_X86_64 - const uint64_t *pb = b->d; - __asm__ __volatile__( - /* Preload */ - "movq 0(%%rdi), %%r15\n" - "movq 8(%%rdi), %%rbx\n" - "movq 16(%%rdi), %%rcx\n" - "movq 0(%%rdx), %%r11\n" - "movq 8(%%rdx), %%r12\n" - "movq 16(%%rdx), %%r13\n" - "movq 24(%%rdx), %%r14\n" - /* (rax,rdx) = a0 * b0 */ - "movq %%r15, %%rax\n" - "mulq %%r11\n" - /* Extract l0 */ - "movq %%rax, 0(%%rsi)\n" - /* (r8,r9,r10) = (rdx) */ - "movq %%rdx, %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += a0 * b1 */ - "movq %%r15, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a1 * b0 */ - "movq %%rbx, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l1 */ - "movq %%r8, 8(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += a0 * b2 */ - "movq %%r15, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a1 * b1 */ - "movq %%rbx, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a2 * b0 */ - "movq %%rcx, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l2 */ - "movq %%r9, 16(%%rsi)\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += a0 * b3 */ - "movq %%r15, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* Preload a3 */ - "movq 24(%%rdi), %%r15\n" - /* (r10,r8,r9) += a1 * b2 */ - "movq %%rbx, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += a2 * b1 */ - "movq %%rcx, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += a3 * b0 */ - "movq %%r15, %%rax\n" - "mulq %%r11\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* Extract l3 */ - "movq %%r10, 24(%%rsi)\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += a1 * b3 */ - "movq %%rbx, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a2 * b2 */ - "movq %%rcx, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a3 * b1 */ - "movq %%r15, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l4 */ - "movq %%r8, 32(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += a2 * b3 */ - "movq %%rcx, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a3 * b2 */ - "movq %%r15, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l5 */ - "movq %%r9, 40(%%rsi)\n" - /* (r10,r8) += a3 * b3 */ - "movq %%r15, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - /* Extract l6 */ - "movq %%r10, 48(%%rsi)\n" - /* Extract l7 */ - "movq %%r8, 56(%%rsi)\n" - : "+d"(pb) - : "S"(l), "D"(a->d) - : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory"); -#else - /* 160 bit accumulator. */ - uint64_t c0 = 0, c1 = 0; - uint32_t c2 = 0; - - /* l[0..7] = a[0..3] * b[0..3]. */ - muladd_fast(a->d[0], b->d[0]); - extract_fast(l[0]); - muladd(a->d[0], b->d[1]); - muladd(a->d[1], b->d[0]); - extract(l[1]); - muladd(a->d[0], b->d[2]); - muladd(a->d[1], b->d[1]); - muladd(a->d[2], b->d[0]); - extract(l[2]); - muladd(a->d[0], b->d[3]); - muladd(a->d[1], b->d[2]); - muladd(a->d[2], b->d[1]); - muladd(a->d[3], b->d[0]); - extract(l[3]); - muladd(a->d[1], b->d[3]); - muladd(a->d[2], b->d[2]); - muladd(a->d[3], b->d[1]); - extract(l[4]); - muladd(a->d[2], b->d[3]); - muladd(a->d[3], b->d[2]); - extract(l[5]); - muladd_fast(a->d[3], b->d[3]); - extract_fast(l[6]); - VERIFY_CHECK(c1 == 0); - l[7] = c0; -#endif -} - -static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) { -#ifdef USE_ASM_X86_64 - __asm__ __volatile__( - /* Preload */ - "movq 0(%%rdi), %%r11\n" - "movq 8(%%rdi), %%r12\n" - "movq 16(%%rdi), %%r13\n" - "movq 24(%%rdi), %%r14\n" - /* (rax,rdx) = a0 * a0 */ - "movq %%r11, %%rax\n" - "mulq %%r11\n" - /* Extract l0 */ - "movq %%rax, 0(%%rsi)\n" - /* (r8,r9,r10) = (rdx,0) */ - "movq %%rdx, %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a0 * a1 */ - "movq %%r11, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l1 */ - "movq %%r8, 8(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a0 * a2 */ - "movq %%r11, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a1 * a1 */ - "movq %%r12, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l2 */ - "movq %%r9, 16(%%rsi)\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += 2 * a0 * a3 */ - "movq %%r11, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += 2 * a1 * a2 */ - "movq %%r12, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* Extract l3 */ - "movq %%r10, 24(%%rsi)\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a1 * a3 */ - "movq %%r12, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a2 * a2 */ - "movq %%r13, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l4 */ - "movq %%r8, 32(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a2 * a3 */ - "movq %%r13, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l5 */ - "movq %%r9, 40(%%rsi)\n" - /* (r10,r8) += a3 * a3 */ - "movq %%r14, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - /* Extract l6 */ - "movq %%r10, 48(%%rsi)\n" - /* Extract l7 */ - "movq %%r8, 56(%%rsi)\n" - : - : "S"(l), "D"(a->d) - : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); -#else - /* 160 bit accumulator. */ - uint64_t c0 = 0, c1 = 0; - uint32_t c2 = 0; - - /* l[0..7] = a[0..3] * b[0..3]. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd_fast(a->d[3], a->d[3]); - extract_fast(l[6]); - VERIFY_CHECK(c1 == 0); - l[7] = c0; -#endif -} - -#undef sumadd -#undef sumadd_fast -#undef muladd -#undef muladd_fast -#undef muladd2 -#undef extract -#undef extract_fast - -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - uint64_t l[8]; - secp256k1_scalar_mul_512(l, a, b); - secp256k1_scalar_reduce_512(r, l); -} - -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n)); - r->d[3] = (r->d[3] >> n); - return ret; -} - -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint64_t l[8]; - secp256k1_scalar_sqr_512(l, a); - secp256k1_scalar_reduce_512(r, l); -} - -#ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - r1->d[0] = a->d[0]; - r1->d[1] = a->d[1]; - r1->d[2] = 0; - r1->d[3] = 0; - r2->d[0] = a->d[2]; - r2->d[1] = a->d[3]; - r2->d[2] = 0; - r2->d[3] = 0; -} -#endif - -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; -} - -SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { - uint64_t l[8]; - unsigned int shiftlimbs; - unsigned int shiftlow; - unsigned int shifthigh; - VERIFY_CHECK(shift >= 256); - secp256k1_scalar_mul_512(l, a, b); - shiftlimbs = shift >> 6; - shiftlow = shift & 0x3F; - shifthigh = 64 - shiftlow; - r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; - secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32.h deleted file mode 100644 index 1319664f6..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32.h +++ /dev/null @@ -1,19 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_ -#define _SECP256K1_SCALAR_REPR_ - -#include - -/** A scalar modulo the group order of the secp256k1 curve. */ -typedef struct { - uint32_t d[8]; -} secp256k1_scalar; - -#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32_impl.h deleted file mode 100644 index aae4f35c0..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_8x32_impl.h +++ /dev/null @@ -1,721 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ -#define _SECP256K1_SCALAR_REPR_IMPL_H_ - -/* Limbs of the secp256k1 order. */ -#define SECP256K1_N_0 ((uint32_t)0xD0364141UL) -#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) -#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL) -#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL) -#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL) -#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL) -#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL) -#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL) - -/* Limbs of 2^256 minus the secp256k1 order. */ -#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1) -#define SECP256K1_N_C_1 (~SECP256K1_N_1) -#define SECP256K1_N_C_2 (~SECP256K1_N_2) -#define SECP256K1_N_C_3 (~SECP256K1_N_3) -#define SECP256K1_N_C_4 (1) - -/* Limbs of half the secp256k1 order. */ -#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL) -#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL) -#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL) -#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL) -#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL) -#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL) -#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL) -#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL) - -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { - r->d[0] = 0; - r->d[1] = 0; - r->d[2] = 0; - r->d[3] = 0; - r->d[4] = 0; - r->d[5] = 0; - r->d[6] = 0; - r->d[7] = 0; -} - -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { - r->d[0] = v; - r->d[1] = 0; - r->d[2] = 0; - r->d[3] = 0; - r->d[4] = 0; - r->d[5] = 0; - r->d[6] = 0; - r->d[7] = 0; -} - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); - return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); -} - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - VERIFY_CHECK(count < 32); - VERIFY_CHECK(offset + count <= 256); - if ((offset + count - 1) >> 5 == offset >> 5) { - return secp256k1_scalar_get_bits(a, offset, count); - } else { - VERIFY_CHECK((offset >> 5) + 1 < 8); - return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1); - } -} - -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { - int yes = 0; - int no = 0; - no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */ - no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */ - no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */ - no |= (a->d[4] < SECP256K1_N_4); - yes |= (a->d[4] > SECP256K1_N_4) & ~no; - no |= (a->d[3] < SECP256K1_N_3) & ~yes; - yes |= (a->d[3] > SECP256K1_N_3) & ~no; - no |= (a->d[2] < SECP256K1_N_2) & ~yes; - yes |= (a->d[2] > SECP256K1_N_2) & ~no; - no |= (a->d[1] < SECP256K1_N_1) & ~yes; - yes |= (a->d[1] > SECP256K1_N_1) & ~no; - yes |= (a->d[0] >= SECP256K1_N_0) & ~no; - return yes; -} - -SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) { - uint64_t t; - VERIFY_CHECK(overflow <= 1); - t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0; - r->d[0] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1; - r->d[1] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2; - r->d[2] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3; - r->d[3] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4; - r->d[4] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[5]; - r->d[5] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[6]; - r->d[6] = t & 0xFFFFFFFFUL; t >>= 32; - t += (uint64_t)r->d[7]; - r->d[7] = t & 0xFFFFFFFFUL; - return overflow; -} - -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - int overflow; - uint64_t t = (uint64_t)a->d[0] + b->d[0]; - r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[1] + b->d[1]; - r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[2] + b->d[2]; - r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[3] + b->d[3]; - r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[4] + b->d[4]; - r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[5] + b->d[5]; - r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[6] + b->d[6]; - r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)a->d[7] + b->d[7]; - r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - overflow = t + secp256k1_scalar_check_overflow(r); - VERIFY_CHECK(overflow == 0 || overflow == 1); - secp256k1_scalar_reduce(r, overflow); - return overflow; -} - -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { - uint64_t t; - VERIFY_CHECK(bit < 256); - bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ - t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F)); - r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F)); - r->d[1] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F)); - r->d[2] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F)); - r->d[3] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F)); - r->d[4] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F)); - r->d[5] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F)); - r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; - t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); - r->d[7] = t & 0xFFFFFFFFULL; -#ifdef VERIFY - VERIFY_CHECK((t >> 32) == 0); - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif -} - -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { - int over; - r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24; - r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24; - r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24; - r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24; - r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24; - r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24; - r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24; - r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24; - over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r)); - if (overflow) { - *overflow = over; - } -} - -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7]; - bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6]; - bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5]; - bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4]; - bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3]; - bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2]; - bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1]; - bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0]; -} - -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; -} - -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); - uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; - r->d[0] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; - r->d[1] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[2]) + SECP256K1_N_2; - r->d[2] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[3]) + SECP256K1_N_3; - r->d[3] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[4]) + SECP256K1_N_4; - r->d[4] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[5]) + SECP256K1_N_5; - r->d[5] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[6]) + SECP256K1_N_6; - r->d[6] = t & nonzero; t >>= 32; - t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; - r->d[7] = t & nonzero; -} - -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; -} - -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { - int yes = 0; - int no = 0; - no |= (a->d[7] < SECP256K1_N_H_7); - yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; - no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */ - no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */ - no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */ - no |= (a->d[3] < SECP256K1_N_H_3) & ~yes; - yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; - no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; - yes |= (a->d[2] > SECP256K1_N_H_2) & ~no; - no |= (a->d[1] < SECP256K1_N_H_1) & ~yes; - yes |= (a->d[1] > SECP256K1_N_H_1) & ~no; - yes |= (a->d[0] > SECP256K1_N_H_0) & ~no; - return yes; -} - -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - /* If we are flag = 0, mask = 00...00 and this is a no-op; - * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */ - uint32_t mask = !flag - 1; - uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); - uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - r->d[0] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); - r->d[1] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask); - r->d[2] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask); - r->d[3] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask); - r->d[4] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask); - r->d[5] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask); - r->d[6] = t & nonzero; t >>= 32; - t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); - r->d[7] = t & nonzero; - return 2 * (mask == 0) - 1; -} - - -/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */ - -/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd(a,b) { \ - uint32_t tl, th; \ - { \ - uint64_t t = (uint64_t)a * b; \ - th = t >> 32; /* at most 0xFFFFFFFE */ \ - tl = t; \ - } \ - c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ - c1 += th; /* overflow is handled on the next line */ \ - c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \ - VERIFY_CHECK((c1 >= th) || (c2 != 0)); \ -} - -/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */ -#define muladd_fast(a,b) { \ - uint32_t tl, th; \ - { \ - uint64_t t = (uint64_t)a * b; \ - th = t >> 32; /* at most 0xFFFFFFFE */ \ - tl = t; \ - } \ - c0 += tl; /* overflow is handled on the next line */ \ - th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ - c1 += th; /* never overflows by contract (verified in the next line) */ \ - VERIFY_CHECK(c1 >= th); \ -} - -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint32_t tl, th, th2, tl2; \ - { \ - uint64_t t = (uint64_t)a * b; \ - th = t >> 32; /* at most 0xFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ - c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ - th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - -/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define sumadd(a) { \ - unsigned int over; \ - c0 += (a); /* overflow is handled on the next line */ \ - over = (c0 < (a)) ? 1 : 0; \ - c1 += over; /* overflow is handled on the next line */ \ - c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \ -} - -/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */ -#define sumadd_fast(a) { \ - c0 += (a); /* overflow is handled on the next line */ \ - c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \ - VERIFY_CHECK(c2 == 0); \ -} - -/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */ -#define extract(n) { \ - (n) = c0; \ - c0 = c1; \ - c1 = c2; \ - c2 = 0; \ -} - -/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */ -#define extract_fast(n) { \ - (n) = c0; \ - c0 = c1; \ - c1 = 0; \ - VERIFY_CHECK(c2 == 0); \ -} - -static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) { - uint64_t c; - uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15]; - uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12; - uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8; - - /* 96 bit accumulator. */ - uint32_t c0, c1, c2; - - /* Reduce 512 bits into 385. */ - /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */ - c0 = l[0]; c1 = 0; c2 = 0; - muladd_fast(n0, SECP256K1_N_C_0); - extract_fast(m0); - sumadd_fast(l[1]); - muladd(n1, SECP256K1_N_C_0); - muladd(n0, SECP256K1_N_C_1); - extract(m1); - sumadd(l[2]); - muladd(n2, SECP256K1_N_C_0); - muladd(n1, SECP256K1_N_C_1); - muladd(n0, SECP256K1_N_C_2); - extract(m2); - sumadd(l[3]); - muladd(n3, SECP256K1_N_C_0); - muladd(n2, SECP256K1_N_C_1); - muladd(n1, SECP256K1_N_C_2); - muladd(n0, SECP256K1_N_C_3); - extract(m3); - sumadd(l[4]); - muladd(n4, SECP256K1_N_C_0); - muladd(n3, SECP256K1_N_C_1); - muladd(n2, SECP256K1_N_C_2); - muladd(n1, SECP256K1_N_C_3); - sumadd(n0); - extract(m4); - sumadd(l[5]); - muladd(n5, SECP256K1_N_C_0); - muladd(n4, SECP256K1_N_C_1); - muladd(n3, SECP256K1_N_C_2); - muladd(n2, SECP256K1_N_C_3); - sumadd(n1); - extract(m5); - sumadd(l[6]); - muladd(n6, SECP256K1_N_C_0); - muladd(n5, SECP256K1_N_C_1); - muladd(n4, SECP256K1_N_C_2); - muladd(n3, SECP256K1_N_C_3); - sumadd(n2); - extract(m6); - sumadd(l[7]); - muladd(n7, SECP256K1_N_C_0); - muladd(n6, SECP256K1_N_C_1); - muladd(n5, SECP256K1_N_C_2); - muladd(n4, SECP256K1_N_C_3); - sumadd(n3); - extract(m7); - muladd(n7, SECP256K1_N_C_1); - muladd(n6, SECP256K1_N_C_2); - muladd(n5, SECP256K1_N_C_3); - sumadd(n4); - extract(m8); - muladd(n7, SECP256K1_N_C_2); - muladd(n6, SECP256K1_N_C_3); - sumadd(n5); - extract(m9); - muladd(n7, SECP256K1_N_C_3); - sumadd(n6); - extract(m10); - sumadd_fast(n7); - extract_fast(m11); - VERIFY_CHECK(c0 <= 1); - m12 = c0; - - /* Reduce 385 bits into 258. */ - /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */ - c0 = m0; c1 = 0; c2 = 0; - muladd_fast(m8, SECP256K1_N_C_0); - extract_fast(p0); - sumadd_fast(m1); - muladd(m9, SECP256K1_N_C_0); - muladd(m8, SECP256K1_N_C_1); - extract(p1); - sumadd(m2); - muladd(m10, SECP256K1_N_C_0); - muladd(m9, SECP256K1_N_C_1); - muladd(m8, SECP256K1_N_C_2); - extract(p2); - sumadd(m3); - muladd(m11, SECP256K1_N_C_0); - muladd(m10, SECP256K1_N_C_1); - muladd(m9, SECP256K1_N_C_2); - muladd(m8, SECP256K1_N_C_3); - extract(p3); - sumadd(m4); - muladd(m12, SECP256K1_N_C_0); - muladd(m11, SECP256K1_N_C_1); - muladd(m10, SECP256K1_N_C_2); - muladd(m9, SECP256K1_N_C_3); - sumadd(m8); - extract(p4); - sumadd(m5); - muladd(m12, SECP256K1_N_C_1); - muladd(m11, SECP256K1_N_C_2); - muladd(m10, SECP256K1_N_C_3); - sumadd(m9); - extract(p5); - sumadd(m6); - muladd(m12, SECP256K1_N_C_2); - muladd(m11, SECP256K1_N_C_3); - sumadd(m10); - extract(p6); - sumadd_fast(m7); - muladd_fast(m12, SECP256K1_N_C_3); - sumadd_fast(m11); - extract_fast(p7); - p8 = c0 + m12; - VERIFY_CHECK(p8 <= 2); - - /* Reduce 258 bits into 256. */ - /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */ - c = p0 + (uint64_t)SECP256K1_N_C_0 * p8; - r->d[0] = c & 0xFFFFFFFFUL; c >>= 32; - c += p1 + (uint64_t)SECP256K1_N_C_1 * p8; - r->d[1] = c & 0xFFFFFFFFUL; c >>= 32; - c += p2 + (uint64_t)SECP256K1_N_C_2 * p8; - r->d[2] = c & 0xFFFFFFFFUL; c >>= 32; - c += p3 + (uint64_t)SECP256K1_N_C_3 * p8; - r->d[3] = c & 0xFFFFFFFFUL; c >>= 32; - c += p4 + (uint64_t)p8; - r->d[4] = c & 0xFFFFFFFFUL; c >>= 32; - c += p5; - r->d[5] = c & 0xFFFFFFFFUL; c >>= 32; - c += p6; - r->d[6] = c & 0xFFFFFFFFUL; c >>= 32; - c += p7; - r->d[7] = c & 0xFFFFFFFFUL; c >>= 32; - - /* Final reduction of r. */ - secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); -} - -static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) { - /* 96 bit accumulator. */ - uint32_t c0 = 0, c1 = 0, c2 = 0; - - /* l[0..15] = a[0..7] * b[0..7]. */ - muladd_fast(a->d[0], b->d[0]); - extract_fast(l[0]); - muladd(a->d[0], b->d[1]); - muladd(a->d[1], b->d[0]); - extract(l[1]); - muladd(a->d[0], b->d[2]); - muladd(a->d[1], b->d[1]); - muladd(a->d[2], b->d[0]); - extract(l[2]); - muladd(a->d[0], b->d[3]); - muladd(a->d[1], b->d[2]); - muladd(a->d[2], b->d[1]); - muladd(a->d[3], b->d[0]); - extract(l[3]); - muladd(a->d[0], b->d[4]); - muladd(a->d[1], b->d[3]); - muladd(a->d[2], b->d[2]); - muladd(a->d[3], b->d[1]); - muladd(a->d[4], b->d[0]); - extract(l[4]); - muladd(a->d[0], b->d[5]); - muladd(a->d[1], b->d[4]); - muladd(a->d[2], b->d[3]); - muladd(a->d[3], b->d[2]); - muladd(a->d[4], b->d[1]); - muladd(a->d[5], b->d[0]); - extract(l[5]); - muladd(a->d[0], b->d[6]); - muladd(a->d[1], b->d[5]); - muladd(a->d[2], b->d[4]); - muladd(a->d[3], b->d[3]); - muladd(a->d[4], b->d[2]); - muladd(a->d[5], b->d[1]); - muladd(a->d[6], b->d[0]); - extract(l[6]); - muladd(a->d[0], b->d[7]); - muladd(a->d[1], b->d[6]); - muladd(a->d[2], b->d[5]); - muladd(a->d[3], b->d[4]); - muladd(a->d[4], b->d[3]); - muladd(a->d[5], b->d[2]); - muladd(a->d[6], b->d[1]); - muladd(a->d[7], b->d[0]); - extract(l[7]); - muladd(a->d[1], b->d[7]); - muladd(a->d[2], b->d[6]); - muladd(a->d[3], b->d[5]); - muladd(a->d[4], b->d[4]); - muladd(a->d[5], b->d[3]); - muladd(a->d[6], b->d[2]); - muladd(a->d[7], b->d[1]); - extract(l[8]); - muladd(a->d[2], b->d[7]); - muladd(a->d[3], b->d[6]); - muladd(a->d[4], b->d[5]); - muladd(a->d[5], b->d[4]); - muladd(a->d[6], b->d[3]); - muladd(a->d[7], b->d[2]); - extract(l[9]); - muladd(a->d[3], b->d[7]); - muladd(a->d[4], b->d[6]); - muladd(a->d[5], b->d[5]); - muladd(a->d[6], b->d[4]); - muladd(a->d[7], b->d[3]); - extract(l[10]); - muladd(a->d[4], b->d[7]); - muladd(a->d[5], b->d[6]); - muladd(a->d[6], b->d[5]); - muladd(a->d[7], b->d[4]); - extract(l[11]); - muladd(a->d[5], b->d[7]); - muladd(a->d[6], b->d[6]); - muladd(a->d[7], b->d[5]); - extract(l[12]); - muladd(a->d[6], b->d[7]); - muladd(a->d[7], b->d[6]); - extract(l[13]); - muladd_fast(a->d[7], b->d[7]); - extract_fast(l[14]); - VERIFY_CHECK(c1 == 0); - l[15] = c0; -} - -static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) { - /* 96 bit accumulator. */ - uint32_t c0 = 0, c1 = 0, c2 = 0; - - /* l[0..15] = a[0..7]^2. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[0], a->d[4]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[0], a->d[5]); - muladd2(a->d[1], a->d[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd2(a->d[0], a->d[6]); - muladd2(a->d[1], a->d[5]); - muladd2(a->d[2], a->d[4]); - muladd(a->d[3], a->d[3]); - extract(l[6]); - muladd2(a->d[0], a->d[7]); - muladd2(a->d[1], a->d[6]); - muladd2(a->d[2], a->d[5]); - muladd2(a->d[3], a->d[4]); - extract(l[7]); - muladd2(a->d[1], a->d[7]); - muladd2(a->d[2], a->d[6]); - muladd2(a->d[3], a->d[5]); - muladd(a->d[4], a->d[4]); - extract(l[8]); - muladd2(a->d[2], a->d[7]); - muladd2(a->d[3], a->d[6]); - muladd2(a->d[4], a->d[5]); - extract(l[9]); - muladd2(a->d[3], a->d[7]); - muladd2(a->d[4], a->d[6]); - muladd(a->d[5], a->d[5]); - extract(l[10]); - muladd2(a->d[4], a->d[7]); - muladd2(a->d[5], a->d[6]); - extract(l[11]); - muladd2(a->d[5], a->d[7]); - muladd(a->d[6], a->d[6]); - extract(l[12]); - muladd2(a->d[6], a->d[7]); - extract(l[13]); - muladd_fast(a->d[7], a->d[7]); - extract_fast(l[14]); - VERIFY_CHECK(c1 == 0); - l[15] = c0; -} - -#undef sumadd -#undef sumadd_fast -#undef muladd -#undef muladd_fast -#undef muladd2 -#undef extract -#undef extract_fast - -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - uint32_t l[16]; - secp256k1_scalar_mul_512(l, a, b); - secp256k1_scalar_reduce_512(r, l); -} - -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - ret = r->d[0] & ((1 << n) - 1); - r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n)); - r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n)); - r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n)); - r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n)); - r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n)); - r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n)); - r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n)); - r->d[7] = (r->d[7] >> n); - return ret; -} - -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { - uint32_t l[16]; - secp256k1_scalar_sqr_512(l, a); - secp256k1_scalar_reduce_512(r, l); -} - -#ifdef USE_ENDOMORPHISM -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - r1->d[0] = a->d[0]; - r1->d[1] = a->d[1]; - r1->d[2] = a->d[2]; - r1->d[3] = a->d[3]; - r1->d[4] = 0; - r1->d[5] = 0; - r1->d[6] = 0; - r1->d[7] = 0; - r2->d[0] = a->d[4]; - r2->d[1] = a->d[5]; - r2->d[2] = a->d[6]; - r2->d[3] = a->d[7]; - r2->d[4] = 0; - r2->d[5] = 0; - r2->d[6] = 0; - r2->d[7] = 0; -} -#endif - -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; -} - -SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) { - uint32_t l[16]; - unsigned int shiftlimbs; - unsigned int shiftlow; - unsigned int shifthigh; - VERIFY_CHECK(shift >= 256); - secp256k1_scalar_mul_512(l, a, b); - shiftlimbs = shift >> 5; - shiftlow = shift & 0x1F; - shifthigh = 32 - shiftlow; - r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; - r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; - secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_impl.h deleted file mode 100644 index f5b237640..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_impl.h +++ /dev/null @@ -1,370 +0,0 @@ -/********************************************************************** - * Copyright (c) 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_IMPL_H_ -#define _SECP256K1_SCALAR_IMPL_H_ - -#include "group.h" -#include "scalar.h" - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(EXHAUSTIVE_TEST_ORDER) -#include "scalar_low_impl.h" -#elif defined(USE_SCALAR_4X64) -#include "scalar_4x64_impl.h" -#elif defined(USE_SCALAR_8X32) -#include "scalar_8x32_impl.h" -#else -#error "Please select scalar implementation" -#endif - -#ifndef USE_NUM_NONE -static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) { - unsigned char c[32]; - secp256k1_scalar_get_b32(c, a); - secp256k1_num_set_bin(r, c, 32); -} - -/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */ -static void secp256k1_scalar_order_get_num(secp256k1_num *r) { -#if defined(EXHAUSTIVE_TEST_ORDER) - static const unsigned char order[32] = { - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER - }; -#else - static const unsigned char order[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 - }; -#endif - secp256k1_num_set_bin(r, order, 32); -} -#endif - -static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { -#if defined(EXHAUSTIVE_TEST_ORDER) - int i; - *r = 0; - for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) - if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) - *r = i; - /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus - * have a composite group order; fix it in exhaustive_tests.c). */ - VERIFY_CHECK(*r != 0); -} -#else - secp256k1_scalar *t; - int i; - /* First compute x ^ (2^N - 1) for some values of N. */ - secp256k1_scalar x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127; - - secp256k1_scalar_sqr(&x2, x); - secp256k1_scalar_mul(&x2, &x2, x); - - secp256k1_scalar_sqr(&x3, &x2); - secp256k1_scalar_mul(&x3, &x3, x); - - secp256k1_scalar_sqr(&x4, &x3); - secp256k1_scalar_mul(&x4, &x4, x); - - secp256k1_scalar_sqr(&x6, &x4); - secp256k1_scalar_sqr(&x6, &x6); - secp256k1_scalar_mul(&x6, &x6, &x2); - - secp256k1_scalar_sqr(&x7, &x6); - secp256k1_scalar_mul(&x7, &x7, x); - - secp256k1_scalar_sqr(&x8, &x7); - secp256k1_scalar_mul(&x8, &x8, x); - - secp256k1_scalar_sqr(&x15, &x8); - for (i = 0; i < 6; i++) { - secp256k1_scalar_sqr(&x15, &x15); - } - secp256k1_scalar_mul(&x15, &x15, &x7); - - secp256k1_scalar_sqr(&x30, &x15); - for (i = 0; i < 14; i++) { - secp256k1_scalar_sqr(&x30, &x30); - } - secp256k1_scalar_mul(&x30, &x30, &x15); - - secp256k1_scalar_sqr(&x60, &x30); - for (i = 0; i < 29; i++) { - secp256k1_scalar_sqr(&x60, &x60); - } - secp256k1_scalar_mul(&x60, &x60, &x30); - - secp256k1_scalar_sqr(&x120, &x60); - for (i = 0; i < 59; i++) { - secp256k1_scalar_sqr(&x120, &x120); - } - secp256k1_scalar_mul(&x120, &x120, &x60); - - secp256k1_scalar_sqr(&x127, &x120); - for (i = 0; i < 6; i++) { - secp256k1_scalar_sqr(&x127, &x127); - } - secp256k1_scalar_mul(&x127, &x127, &x7); - - /* Then accumulate the final result (t starts at x127). */ - t = &x127; - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 3; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 5; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 5; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x4); /* 1111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 4; i++) { /* 000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 10; i++) { /* 0000000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 9; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x8); /* 11111111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 5; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x4); /* 1111 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 5; i++) { /* 000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 4; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 2; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 8; i++) { /* 000000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 3; i++) { /* 0 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 3; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 6; i++) { /* 00000 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 8; i++) { /* 00 */ - secp256k1_scalar_sqr(t, t); - } - secp256k1_scalar_mul(r, t, &x6); /* 111111 */ -} - -SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - return !(a->d[0] & 1); -} -#endif - -static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { -#if defined(USE_SCALAR_INV_BUILTIN) - secp256k1_scalar_inverse(r, x); -#elif defined(USE_SCALAR_INV_NUM) - unsigned char b[32]; - secp256k1_num n, m; - secp256k1_scalar t = *x; - secp256k1_scalar_get_b32(b, &t); - secp256k1_num_set_bin(&n, b, 32); - secp256k1_scalar_order_get_num(&m); - secp256k1_num_mod_inverse(&n, &n, &m); - secp256k1_num_get_bin(b, 32, &n); - secp256k1_scalar_set_b32(r, b, NULL); - /* Verify that the inverse was computed correctly, without GMP code. */ - secp256k1_scalar_mul(&t, &t, r); - CHECK(secp256k1_scalar_is_one(&t)); -#else -#error "Please select scalar inverse implementation" -#endif -} - -#ifdef USE_ENDOMORPHISM -#if defined(EXHAUSTIVE_TEST_ORDER) -/** - * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the - * full case we don't bother making k1 and k2 be small, we just want them to be - * nontrivial to get full test coverage for the exhaustive tests. We therefore - * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda. - */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER; - *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; -} -#else -/** - * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where - * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, - * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72} - * - * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm - * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 - * and k2 have a small size. - * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are: - * - * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} - * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} - * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} - * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} - * - * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives - * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and - * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2. - * - * g1, g2 are precomputed constants used to replace division with a rounded multiplication - * when decomposing the scalar for an endomorphism-based point multiplication. - * - * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve - * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5. - * - * The derivation is described in the paper "Efficient Software Implementation of Public-Key - * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), - * Section 4.3 (here we use a somewhat higher-precision estimate): - * d = a1*b2 - b1*a2 - * g1 = round((2^272)*b2/d) - * g2 = round((2^272)*b1/d) - * - * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found - * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). - * - * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order). - */ - -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - secp256k1_scalar c1, c2; - static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST( - 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL, - 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL - ); - static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, - 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL - ); - static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, - 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL - ); - static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL, - 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL - ); - static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL, - 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL - ); - VERIFY_CHECK(r1 != a); - VERIFY_CHECK(r2 != a); - /* these _var calls are constant time since the shift amount is constant */ - secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272); - secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272); - secp256k1_scalar_mul(&c1, &c1, &minus_b1); - secp256k1_scalar_mul(&c2, &c2, &minus_b2); - secp256k1_scalar_add(r2, &c1, &c2); - secp256k1_scalar_mul(r1, r2, &minus_lambda); - secp256k1_scalar_add(r1, r1, a); -} -#endif -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low.h deleted file mode 100644 index 5574c44c7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low.h +++ /dev/null @@ -1,15 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_ -#define _SECP256K1_SCALAR_REPR_ - -#include - -/** A scalar modulo the group order of the secp256k1 curve. */ -typedef uint32_t secp256k1_scalar; - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low_impl.h deleted file mode 100644 index 4f94441f4..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/scalar_low_impl.h +++ /dev/null @@ -1,114 +0,0 @@ -/********************************************************************** - * Copyright (c) 2015 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_ -#define _SECP256K1_SCALAR_REPR_IMPL_H_ - -#include "scalar.h" - -#include - -SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - return !(*a & 1); -} - -SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; } -SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; } - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - if (offset < 32) - return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); - else - return 0; -} - -SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - return secp256k1_scalar_get_bits(a, offset, count); -} - -SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } - -static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; - return *r < *b; -} - -static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { - if (flag && bit < 32) - *r += (1 << bit); -#ifdef VERIFY - VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif -} - -static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { - const int base = 0x100 % EXHAUSTIVE_TEST_ORDER; - int i; - *r = 0; - for (i = 0; i < 32; i++) { - *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER; - } - /* just deny overflow, it basically always happens */ - if (overflow) *overflow = 0; -} - -static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - memset(bin, 0, 32); - bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; -} - -SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - return *a == 0; -} - -static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - if (*a == 0) { - *r = 0; - } else { - *r = EXHAUSTIVE_TEST_ORDER - *a; - } -} - -SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - return *a == 1; -} - -static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { - return *a > EXHAUSTIVE_TEST_ORDER / 2; -} - -static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - if (flag) secp256k1_scalar_negate(r, r); - return flag ? -1 : 1; -} - -static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; -} - -static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) { - int ret; - VERIFY_CHECK(n > 0); - VERIFY_CHECK(n < 16); - ret = *r & ((1 << n) - 1); - *r >>= n; - return ret; -} - -static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) { - *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; -} - -static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - *r1 = *a; - *r2 = 0; -} - -SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - return *a == *b; -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/secp256k1.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/secp256k1.c deleted file mode 100755 index 7d637bfad..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/secp256k1.c +++ /dev/null @@ -1,559 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#include "include/secp256k1.h" - -#include "util.h" -#include "num_impl.h" -#include "field_impl.h" -#include "scalar_impl.h" -#include "group_impl.h" -#include "ecmult_impl.h" -#include "ecmult_const_impl.h" -#include "ecmult_gen_impl.h" -#include "ecdsa_impl.h" -#include "eckey_impl.h" -#include "hash_impl.h" - -#define ARG_CHECK(cond) do { \ - if (EXPECT(!(cond), 0)) { \ - secp256k1_callback_call(&ctx->illegal_callback, #cond); \ - return 0; \ - } \ -} while(0) - -static void default_illegal_callback_fn(const char* str, void* data) { - fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); - abort(); -} - -static const secp256k1_callback default_illegal_callback = { - default_illegal_callback_fn, - NULL -}; - -static void default_error_callback_fn(const char* str, void* data) { - fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); - abort(); -} - -static const secp256k1_callback default_error_callback = { - default_error_callback_fn, - NULL -}; - - -struct secp256k1_context_struct { - secp256k1_ecmult_context ecmult_ctx; - secp256k1_ecmult_gen_context ecmult_gen_ctx; - secp256k1_callback illegal_callback; - secp256k1_callback error_callback; -}; - -secp256k1_context* secp256k1_context_create(unsigned int flags) { - secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context)); - ret->illegal_callback = default_illegal_callback; - ret->error_callback = default_error_callback; - - if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { - secp256k1_callback_call(&ret->illegal_callback, - "Invalid flags"); - free(ret); - return NULL; - } - - secp256k1_ecmult_context_init(&ret->ecmult_ctx); - secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); - - if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { - secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback); - } - if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { - secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback); - } - - return ret; -} - -secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) { - secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context)); - ret->illegal_callback = ctx->illegal_callback; - ret->error_callback = ctx->error_callback; - secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback); - secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback); - return ret; -} - -void secp256k1_context_destroy(secp256k1_context* ctx) { - if (ctx != NULL) { - secp256k1_ecmult_context_clear(&ctx->ecmult_ctx); - secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); - - free(ctx); - } -} - -void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - if (fun == NULL) { - fun = default_illegal_callback_fn; - } - ctx->illegal_callback.fn = fun; - ctx->illegal_callback.data = data; -} - -void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - if (fun == NULL) { - fun = default_error_callback_fn; - } - ctx->error_callback.fn = fun; - ctx->error_callback.data = data; -} - -static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) { - if (sizeof(secp256k1_ge_storage) == 64) { - /* When the secp256k1_ge_storage type is exactly 64 byte, use its - * representation inside secp256k1_pubkey, as conversion is very fast. - * Note that secp256k1_pubkey_save must use the same representation. */ - secp256k1_ge_storage s; - memcpy(&s, &pubkey->data[0], 64); - secp256k1_ge_from_storage(ge, &s); - } else { - /* Otherwise, fall back to 32-byte big endian for X and Y. */ - secp256k1_fe x, y; - secp256k1_fe_set_b32(&x, pubkey->data); - secp256k1_fe_set_b32(&y, pubkey->data + 32); - secp256k1_ge_set_xy(ge, &x, &y); - } - ARG_CHECK(!secp256k1_fe_is_zero(&ge->x)); - return 1; -} - -static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) { - if (sizeof(secp256k1_ge_storage) == 64) { - secp256k1_ge_storage s; - secp256k1_ge_to_storage(&s, ge); - memcpy(&pubkey->data[0], &s, 64); - } else { - VERIFY_CHECK(!secp256k1_ge_is_infinity(ge)); - secp256k1_fe_normalize_var(&ge->x); - secp256k1_fe_normalize_var(&ge->y); - secp256k1_fe_get_b32(pubkey->data, &ge->x); - secp256k1_fe_get_b32(pubkey->data + 32, &ge->y); - } -} - -int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) { - secp256k1_ge Q; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(pubkey != NULL); - memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(input != NULL); - if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) { - return 0; - } - secp256k1_pubkey_save(pubkey, &Q); - secp256k1_ge_clear(&Q); - return 1; -} - -int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) { - secp256k1_ge Q; - size_t len; - int ret = 0; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(outputlen != NULL); - ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65)); - len = *outputlen; - *outputlen = 0; - ARG_CHECK(output != NULL); - memset(output, 0, len); - ARG_CHECK(pubkey != NULL); - ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION); - if (secp256k1_pubkey_load(ctx, &Q, pubkey)) { - ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION); - if (ret) { - *outputlen = len; - } - } - return ret; -} - -static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) { - (void)ctx; - if (sizeof(secp256k1_scalar) == 32) { - /* When the secp256k1_scalar type is exactly 32 byte, use its - * representation inside secp256k1_ecdsa_signature, as conversion is very fast. - * Note that secp256k1_ecdsa_signature_save must use the same representation. */ - memcpy(r, &sig->data[0], 32); - memcpy(s, &sig->data[32], 32); - } else { - secp256k1_scalar_set_b32(r, &sig->data[0], NULL); - secp256k1_scalar_set_b32(s, &sig->data[32], NULL); - } -} - -static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) { - if (sizeof(secp256k1_scalar) == 32) { - memcpy(&sig->data[0], r, 32); - memcpy(&sig->data[32], s, 32); - } else { - secp256k1_scalar_get_b32(&sig->data[0], r); - secp256k1_scalar_get_b32(&sig->data[32], s); - } -} - -int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) { - secp256k1_scalar r, s; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sig != NULL); - ARG_CHECK(input != NULL); - - if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) { - secp256k1_ecdsa_signature_save(sig, &r, &s); - return 1; - } else { - memset(sig, 0, sizeof(*sig)); - return 0; - } -} - -int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) { - secp256k1_scalar r, s; - int ret = 1; - int overflow = 0; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sig != NULL); - ARG_CHECK(input64 != NULL); - - secp256k1_scalar_set_b32(&r, &input64[0], &overflow); - ret &= !overflow; - secp256k1_scalar_set_b32(&s, &input64[32], &overflow); - ret &= !overflow; - if (ret) { - secp256k1_ecdsa_signature_save(sig, &r, &s); - } else { - memset(sig, 0, sizeof(*sig)); - } - return ret; -} - -int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) { - secp256k1_scalar r, s; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(output != NULL); - ARG_CHECK(outputlen != NULL); - ARG_CHECK(sig != NULL); - - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s); -} - -int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) { - secp256k1_scalar r, s; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(output64 != NULL); - ARG_CHECK(sig != NULL); - - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - secp256k1_scalar_get_b32(&output64[0], &r); - secp256k1_scalar_get_b32(&output64[32], &s); - return 1; -} - -int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) { - secp256k1_scalar r, s; - int ret = 0; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sigin != NULL); - - secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin); - ret = secp256k1_scalar_is_high(&s); - if (sigout != NULL) { - if (ret) { - secp256k1_scalar_negate(&s, &s); - } - secp256k1_ecdsa_signature_save(sigout, &r, &s); - } - - return ret; -} - -int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) { - secp256k1_ge q; - secp256k1_scalar r, s; - secp256k1_scalar m; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(sig != NULL); - ARG_CHECK(pubkey != NULL); - - secp256k1_scalar_set_b32(&m, msg32, NULL); - secp256k1_ecdsa_signature_load(ctx, &r, &s, sig); - return (!secp256k1_scalar_is_high(&s) && - secp256k1_pubkey_load(ctx, &q, pubkey) && - secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m)); -} - -static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { - unsigned char keydata[112]; - int keylen = 64; - secp256k1_rfc6979_hmac_sha256_t rng; - unsigned int i; - /* We feed a byte array to the PRNG as input, consisting of: - * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d. - * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data. - * - optionally 16 extra bytes with the algorithm name. - * Because the arguments have distinct fixed lengths it is not possible for - * different argument mixtures to emulate each other and result in the same - * nonces. - */ - memcpy(keydata, key32, 32); - memcpy(keydata + 32, msg32, 32); - if (data != NULL) { - memcpy(keydata + 64, data, 32); - keylen = 96; - } - if (algo16 != NULL) { - memcpy(keydata + keylen, algo16, 16); - keylen += 16; - } - secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, keylen); - memset(keydata, 0, sizeof(keydata)); - for (i = 0; i <= counter; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32); - } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); - return 1; -} - -const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979; -const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979; - -int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) { - secp256k1_scalar r, s; - secp256k1_scalar sec, non, msg; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(signature != NULL); - ARG_CHECK(seckey != NULL); - if (noncefp == NULL) { - noncefp = secp256k1_nonce_function_default; - } - - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - /* Fail if the secret key is invalid. */ - if (!overflow && !secp256k1_scalar_is_zero(&sec)) { - unsigned char nonce32[32]; - unsigned int count = 0; - secp256k1_scalar_set_b32(&msg, msg32, NULL); - while (1) { - ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count); - if (!ret) { - break; - } - secp256k1_scalar_set_b32(&non, nonce32, &overflow); - if (!overflow && !secp256k1_scalar_is_zero(&non)) { - if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) { - break; - } - } - count++; - } - memset(nonce32, 0, 32); - secp256k1_scalar_clear(&msg); - secp256k1_scalar_clear(&non); - secp256k1_scalar_clear(&sec); - } - if (ret) { - secp256k1_ecdsa_signature_save(signature, &r, &s); - } else { - memset(signature, 0, sizeof(*signature)); - } - return ret; -} - -int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) { - secp256k1_scalar sec; - int ret; - int overflow; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(seckey != NULL); - - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - ret = !overflow && !secp256k1_scalar_is_zero(&sec); - secp256k1_scalar_clear(&sec); - return ret; -} - -int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) { - secp256k1_gej pj; - secp256k1_ge p; - secp256k1_scalar sec; - int overflow; - int ret = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(pubkey != NULL); - memset(pubkey, 0, sizeof(*pubkey)); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(seckey != NULL); - - secp256k1_scalar_set_b32(&sec, seckey, &overflow); - ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec)); - if (ret) { - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec); - secp256k1_ge_set_gej(&p, &pj); - secp256k1_pubkey_save(pubkey, &p); - } - secp256k1_scalar_clear(&sec); - return ret; -} - -int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { - secp256k1_scalar term; - secp256k1_scalar sec; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(seckey != NULL); - ARG_CHECK(tweak != NULL); - - secp256k1_scalar_set_b32(&term, tweak, &overflow); - secp256k1_scalar_set_b32(&sec, seckey, NULL); - - ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term); - memset(seckey, 0, 32); - if (ret) { - secp256k1_scalar_get_b32(seckey, &sec); - } - - secp256k1_scalar_clear(&sec); - secp256k1_scalar_clear(&term); - return ret; -} - -int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { - secp256k1_ge p; - secp256k1_scalar term; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(pubkey != NULL); - ARG_CHECK(tweak != NULL); - - secp256k1_scalar_set_b32(&term, tweak, &overflow); - ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); - memset(pubkey, 0, sizeof(*pubkey)); - if (ret) { - if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) { - secp256k1_pubkey_save(pubkey, &p); - } else { - ret = 0; - } - } - - return ret; -} - -int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) { - secp256k1_scalar factor; - secp256k1_scalar sec; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(seckey != NULL); - ARG_CHECK(tweak != NULL); - - secp256k1_scalar_set_b32(&factor, tweak, &overflow); - secp256k1_scalar_set_b32(&sec, seckey, NULL); - ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor); - memset(seckey, 0, 32); - if (ret) { - secp256k1_scalar_get_b32(seckey, &sec); - } - - secp256k1_scalar_clear(&sec); - secp256k1_scalar_clear(&factor); - return ret; -} - -int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) { - secp256k1_ge p; - secp256k1_scalar factor; - int ret = 0; - int overflow = 0; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(pubkey != NULL); - ARG_CHECK(tweak != NULL); - - secp256k1_scalar_set_b32(&factor, tweak, &overflow); - ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey); - memset(pubkey, 0, sizeof(*pubkey)); - if (ret) { - if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) { - secp256k1_pubkey_save(pubkey, &p); - } else { - ret = 0; - } - } - - return ret; -} - -int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) { - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); - return 1; -} - -int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) { - size_t i; - secp256k1_gej Qj; - secp256k1_ge Q; - - ARG_CHECK(pubnonce != NULL); - memset(pubnonce, 0, sizeof(*pubnonce)); - ARG_CHECK(n >= 1); - ARG_CHECK(pubnonces != NULL); - - secp256k1_gej_set_infinity(&Qj); - - for (i = 0; i < n; i++) { - secp256k1_pubkey_load(ctx, &Q, pubnonces[i]); - secp256k1_gej_add_ge(&Qj, &Qj, &Q); - } - if (secp256k1_gej_is_infinity(&Qj)) { - return 0; - } - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(pubnonce, &Q); - return 1; -} - -#ifdef ENABLE_MODULE_ECDH -# include "modules/ecdh/main_impl.h" -#endif - -#ifdef ENABLE_MODULE_SCHNORR -# include "modules/schnorr/main_impl.h" -#endif - -#ifdef ENABLE_MODULE_RECOVERY -# include "modules/recovery/main_impl.h" -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand.h deleted file mode 100644 index f8efa93c7..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand.h +++ /dev/null @@ -1,38 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_TESTRAND_H_ -#define _SECP256K1_TESTRAND_H_ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -/* A non-cryptographic RNG used only for test infrastructure. */ - -/** Seed the pseudorandom number generator for testing. */ -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16); - -/** Generate a pseudorandom number in the range [0..2**32-1]. */ -static uint32_t secp256k1_rand32(void); - -/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or - * more. */ -static uint32_t secp256k1_rand_bits(int bits); - -/** Generate a pseudorandom number in the range [0..range-1]. */ -static uint32_t secp256k1_rand_int(uint32_t range); - -/** Generate a pseudorandom 32-byte array. */ -static void secp256k1_rand256(unsigned char *b32); - -/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */ -static void secp256k1_rand256_test(unsigned char *b32); - -/** Generate pseudorandom bytes with long sequences of zero and one bits. */ -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len); - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand_impl.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand_impl.h deleted file mode 100644 index 15c7b9f12..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/testrand_impl.h +++ /dev/null @@ -1,110 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013-2015 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_TESTRAND_IMPL_H_ -#define _SECP256K1_TESTRAND_IMPL_H_ - -#include -#include - -#include "testrand.h" -#include "hash.h" - -static secp256k1_rfc6979_hmac_sha256_t secp256k1_test_rng; -static uint32_t secp256k1_test_rng_precomputed[8]; -static int secp256k1_test_rng_precomputed_used = 8; -static uint64_t secp256k1_test_rng_integer; -static int secp256k1_test_rng_integer_bits_left = 0; - -SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) { - secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16); -} - -SECP256K1_INLINE static uint32_t secp256k1_rand32(void) { - if (secp256k1_test_rng_precomputed_used == 8) { - secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed)); - secp256k1_test_rng_precomputed_used = 0; - } - return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++]; -} - -static uint32_t secp256k1_rand_bits(int bits) { - uint32_t ret; - if (secp256k1_test_rng_integer_bits_left < bits) { - secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left); - secp256k1_test_rng_integer_bits_left += 32; - } - ret = secp256k1_test_rng_integer; - secp256k1_test_rng_integer >>= bits; - secp256k1_test_rng_integer_bits_left -= bits; - ret &= ((~((uint32_t)0)) >> (32 - bits)); - return ret; -} - -static uint32_t secp256k1_rand_int(uint32_t range) { - /* We want a uniform integer between 0 and range-1, inclusive. - * B is the smallest number such that range <= 2**B. - * two mechanisms implemented here: - * - generate B bits numbers until one below range is found, and return it - * - find the largest multiple M of range that is <= 2**(B+A), generate B+A - * bits numbers until one below M is found, and return it modulo range - * The second mechanism consumes A more bits of entropy in every iteration, - * but may need fewer iterations due to M being closer to 2**(B+A) then - * range is to 2**B. The array below (indexed by B) contains a 0 when the - * first mechanism is to be used, and the number A otherwise. - */ - static const int addbits[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0}; - uint32_t trange, mult; - int bits = 0; - if (range <= 1) { - return 0; - } - trange = range - 1; - while (trange > 0) { - trange >>= 1; - bits++; - } - if (addbits[bits]) { - bits = bits + addbits[bits]; - mult = ((~((uint32_t)0)) >> (32 - bits)) / range; - trange = range * mult; - } else { - trange = range; - mult = 1; - } - while(1) { - uint32_t x = secp256k1_rand_bits(bits); - if (x < trange) { - return (mult == 1) ? x : (x % range); - } - } -} - -static void secp256k1_rand256(unsigned char *b32) { - secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32); -} - -static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) { - size_t bits = 0; - memset(bytes, 0, len); - while (bits < len * 8) { - int now; - uint32_t val; - now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31; - val = secp256k1_rand_bits(1); - while (now > 0 && bits < len * 8) { - bytes[bits / 8] |= val << (bits % 8); - now--; - bits++; - } - } -} - -static void secp256k1_rand256_test(unsigned char *b32) { - secp256k1_rand_bytes_test(b32, 32); -} - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests.c deleted file mode 100644 index 9ae7d3028..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests.c +++ /dev/null @@ -1,4525 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include -#include - -#include - -#include "secp256k1.c" -#include "include/secp256k1.h" -#include "testrand_impl.h" - -#ifdef ENABLE_OPENSSL_TESTS -#include "openssl/bn.h" -#include "openssl/ec.h" -#include "openssl/ecdsa.h" -#include "openssl/obj_mac.h" -#endif - -#include "contrib/lax_der_parsing.c" -#include "contrib/lax_der_privatekey_parsing.c" - -#if !defined(VG_CHECK) -# if defined(VALGRIND) -# include -# define VG_UNDEF(x,y) VALGRIND_MAKE_MEM_UNDEFINED((x),(y)) -# define VG_CHECK(x,y) VALGRIND_CHECK_MEM_IS_DEFINED((x),(y)) -# else -# define VG_UNDEF(x,y) -# define VG_CHECK(x,y) -# endif -#endif - -static int count = 64; -static secp256k1_context *ctx = NULL; - -static void counting_illegal_callback_fn(const char* str, void* data) { - /* Dummy callback function that just counts. */ - int32_t *p; - (void)str; - p = data; - (*p)++; -} - -static void uncounting_illegal_callback_fn(const char* str, void* data) { - /* Dummy callback function that just counts (backwards). */ - int32_t *p; - (void)str; - p = data; - (*p)--; -} - -void random_field_element_test(secp256k1_fe *fe) { - do { - unsigned char b32[32]; - secp256k1_rand256_test(b32); - if (secp256k1_fe_set_b32(fe, b32)) { - break; - } - } while(1); -} - -void random_field_element_magnitude(secp256k1_fe *fe) { - secp256k1_fe zero; - int n = secp256k1_rand_int(9); - secp256k1_fe_normalize(fe); - if (n == 0) { - return; - } - secp256k1_fe_clear(&zero); - secp256k1_fe_negate(&zero, &zero, 0); - secp256k1_fe_mul_int(&zero, n - 1); - secp256k1_fe_add(fe, &zero); - VERIFY_CHECK(fe->magnitude == n); -} - -void random_group_element_test(secp256k1_ge *ge) { - secp256k1_fe fe; - do { - random_field_element_test(&fe); - if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) { - secp256k1_fe_normalize(&ge->y); - break; - } - } while(1); -} - -void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) { - secp256k1_fe z2, z3; - do { - random_field_element_test(&gej->z); - if (!secp256k1_fe_is_zero(&gej->z)) { - break; - } - } while(1); - secp256k1_fe_sqr(&z2, &gej->z); - secp256k1_fe_mul(&z3, &z2, &gej->z); - secp256k1_fe_mul(&gej->x, &ge->x, &z2); - secp256k1_fe_mul(&gej->y, &ge->y, &z3); - gej->infinity = ge->infinity; -} - -void random_scalar_order_test(secp256k1_scalar *num) { - do { - unsigned char b32[32]; - int overflow = 0; - secp256k1_rand256_test(b32); - secp256k1_scalar_set_b32(num, b32, &overflow); - if (overflow || secp256k1_scalar_is_zero(num)) { - continue; - } - break; - } while(1); -} - -void random_scalar_order(secp256k1_scalar *num) { - do { - unsigned char b32[32]; - int overflow = 0; - secp256k1_rand256(b32); - secp256k1_scalar_set_b32(num, b32, &overflow); - if (overflow || secp256k1_scalar_is_zero(num)) { - continue; - } - break; - } while(1); -} - -void run_context_tests(void) { - secp256k1_pubkey pubkey; - secp256k1_ecdsa_signature sig; - unsigned char ctmp[32]; - int32_t ecount; - int32_t ecount2; - secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - secp256k1_gej pubj; - secp256k1_ge pub; - secp256k1_scalar msg, key, nonce; - secp256k1_scalar sigr, sigs; - - ecount = 0; - ecount2 = 10; - secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2); - secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL); - CHECK(vrfy->error_callback.fn != sign->error_callback.fn); - - /*** clone and destroy all of them to make sure cloning was complete ***/ - { - secp256k1_context *ctx_tmp; - - ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp); - } - - /* Verify that the error callback makes it across the clone. */ - CHECK(vrfy->error_callback.fn != sign->error_callback.fn); - /* And that it resets back to default. */ - secp256k1_context_set_error_callback(sign, NULL, NULL); - CHECK(vrfy->error_callback.fn == sign->error_callback.fn); - - /*** attempt to use them ***/ - random_scalar_order_test(&msg); - random_scalar_order_test(&key); - secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key); - secp256k1_ge_set_gej(&pub, &pubj); - - /* Verify context-type checking illegal-argument errors. */ - memset(ctmp, 1, 32); - CHECK(secp256k1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0); - CHECK(ecount == 1); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(sign, &pubkey, ctmp) == 1); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0); - CHECK(ecount == 2); - VG_UNDEF(&sig, sizeof(sig)); - CHECK(secp256k1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1); - VG_CHECK(&sig, sizeof(sig)); - CHECK(ecount2 == 10); - CHECK(secp256k1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0); - CHECK(ecount2 == 11); - CHECK(secp256k1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0); - CHECK(ecount2 == 12); - CHECK(secp256k1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0); - CHECK(ecount2 == 13); - CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); - CHECK(ecount == 2); - CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_context_randomize(sign, NULL) == 1); - CHECK(ecount2 == 13); - secp256k1_context_set_illegal_callback(vrfy, NULL, NULL); - secp256k1_context_set_illegal_callback(sign, NULL, NULL); - - /* This shouldn't leak memory, due to already-set tests. */ - secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL); - secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL); - - /* obtain a working nonce */ - do { - random_scalar_order_test(&nonce); - } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - - /* try signing */ - CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL)); - - /* try verifying */ - CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - - /* cleanup */ - secp256k1_context_destroy(none); - secp256k1_context_destroy(sign); - secp256k1_context_destroy(vrfy); - secp256k1_context_destroy(both); - /* Defined as no-op. */ - secp256k1_context_destroy(NULL); -} - -/***** HASH TESTS *****/ - -void run_sha256_tests(void) { - static const char *inputs[8] = { - "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe", - "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", - "For this sample, this 63-byte string will be used as input data", - "This is exactly 64 bytes long, not counting the terminating byte" - }; - static const unsigned char outputs[8][32] = { - {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, - {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad}, - {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50}, - {0xf3, 0x0c, 0xeb, 0x2b, 0xb2, 0x82, 0x9e, 0x79, 0xe4, 0xca, 0x97, 0x53, 0xd3, 0x5a, 0x8e, 0xcc, 0x00, 0x26, 0x2d, 0x16, 0x4c, 0xc0, 0x77, 0x08, 0x02, 0x95, 0x38, 0x1c, 0xbd, 0x64, 0x3f, 0x0d}, - {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30}, - {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1}, - {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42}, - {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8} - }; - int i; - for (i = 0; i < 8; i++) { - unsigned char out[32]; - secp256k1_sha256_t hasher; - secp256k1_sha256_initialize(&hasher); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - secp256k1_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); - if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); - secp256k1_sha256_initialize(&hasher); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - secp256k1_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); - } - } -} - -void run_hmac_sha256_tests(void) { - static const char *keys[6] = { - "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b", - "\x4a\x65\x66\x65", - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", - "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", - "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - }; - static const char *inputs[6] = { - "\x48\x69\x20\x54\x68\x65\x72\x65", - "\x77\x68\x61\x74\x20\x64\x6f\x20\x79\x61\x20\x77\x61\x6e\x74\x20\x66\x6f\x72\x20\x6e\x6f\x74\x68\x69\x6e\x67\x3f", - "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd", - "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd", - "\x54\x65\x73\x74\x20\x55\x73\x69\x6e\x67\x20\x4c\x61\x72\x67\x65\x72\x20\x54\x68\x61\x6e\x20\x42\x6c\x6f\x63\x6b\x2d\x53\x69\x7a\x65\x20\x4b\x65\x79\x20\x2d\x20\x48\x61\x73\x68\x20\x4b\x65\x79\x20\x46\x69\x72\x73\x74", - "\x54\x68\x69\x73\x20\x69\x73\x20\x61\x20\x74\x65\x73\x74\x20\x75\x73\x69\x6e\x67\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x6b\x65\x79\x20\x61\x6e\x64\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x64\x61\x74\x61\x2e\x20\x54\x68\x65\x20\x6b\x65\x79\x20\x6e\x65\x65\x64\x73\x20\x74\x6f\x20\x62\x65\x20\x68\x61\x73\x68\x65\x64\x20\x62\x65\x66\x6f\x72\x65\x20\x62\x65\x69\x6e\x67\x20\x75\x73\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\x48\x4d\x41\x43\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x2e" - }; - static const unsigned char outputs[6][32] = { - {0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7}, - {0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43}, - {0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe}, - {0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b}, - {0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54}, - {0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2} - }; - int i; - for (i = 0; i < 6; i++) { - secp256k1_hmac_sha256_t hasher; - unsigned char out[32]; - secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i])); - secp256k1_hmac_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); - if (strlen(inputs[i]) > 0) { - int split = secp256k1_rand_int(strlen(inputs[i])); - secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i])); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split); - secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split); - secp256k1_hmac_sha256_finalize(&hasher, out); - CHECK(memcmp(out, outputs[i], 32) == 0); - } - } -} - -void run_rfc6979_hmac_sha256_tests(void) { - static const unsigned char key1[65] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x4b, 0xf5, 0x12, 0x2f, 0x34, 0x45, 0x54, 0xc5, 0x3b, 0xde, 0x2e, 0xbb, 0x8c, 0xd2, 0xb7, 0xe3, 0xd1, 0x60, 0x0a, 0xd6, 0x31, 0xc3, 0x85, 0xa5, 0xd7, 0xcc, 0xe2, 0x3c, 0x77, 0x85, 0x45, 0x9a, 0}; - static const unsigned char out1[3][32] = { - {0x4f, 0xe2, 0x95, 0x25, 0xb2, 0x08, 0x68, 0x09, 0x15, 0x9a, 0xcd, 0xf0, 0x50, 0x6e, 0xfb, 0x86, 0xb0, 0xec, 0x93, 0x2c, 0x7b, 0xa4, 0x42, 0x56, 0xab, 0x32, 0x1e, 0x42, 0x1e, 0x67, 0xe9, 0xfb}, - {0x2b, 0xf0, 0xff, 0xf1, 0xd3, 0xc3, 0x78, 0xa2, 0x2d, 0xc5, 0xde, 0x1d, 0x85, 0x65, 0x22, 0x32, 0x5c, 0x65, 0xb5, 0x04, 0x49, 0x1a, 0x0c, 0xbd, 0x01, 0xcb, 0x8f, 0x3a, 0xa6, 0x7f, 0xfd, 0x4a}, - {0xf5, 0x28, 0xb4, 0x10, 0xcb, 0x54, 0x1f, 0x77, 0x00, 0x0d, 0x7a, 0xfb, 0x6c, 0x5b, 0x53, 0xc5, 0xc4, 0x71, 0xea, 0xb4, 0x3e, 0x46, 0x6d, 0x9a, 0xc5, 0x19, 0x0c, 0x39, 0xc8, 0x2f, 0xd8, 0x2e} - }; - - static const unsigned char key2[64] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}; - static const unsigned char out2[3][32] = { - {0x9c, 0x23, 0x6c, 0x16, 0x5b, 0x82, 0xae, 0x0c, 0xd5, 0x90, 0x65, 0x9e, 0x10, 0x0b, 0x6b, 0xab, 0x30, 0x36, 0xe7, 0xba, 0x8b, 0x06, 0x74, 0x9b, 0xaf, 0x69, 0x81, 0xe1, 0x6f, 0x1a, 0x2b, 0x95}, - {0xdf, 0x47, 0x10, 0x61, 0x62, 0x5b, 0xc0, 0xea, 0x14, 0xb6, 0x82, 0xfe, 0xee, 0x2c, 0x9c, 0x02, 0xf2, 0x35, 0xda, 0x04, 0x20, 0x4c, 0x1d, 0x62, 0xa1, 0x53, 0x6c, 0x6e, 0x17, 0xae, 0xd7, 0xa9}, - {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94} - }; - - secp256k1_rfc6979_hmac_sha256_t rng; - unsigned char out[32]; - int i; - - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64); - for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out1[i], 32) == 0); - } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); - - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65); - for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out1[i], 32) != 0); - } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); - - secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64); - for (i = 0; i < 3; i++) { - secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32); - CHECK(memcmp(out, out2[i], 32) == 0); - } - secp256k1_rfc6979_hmac_sha256_finalize(&rng); -} - -/***** RANDOM TESTS *****/ - -void test_rand_bits(int rand32, int bits) { - /* (1-1/2^B)^rounds[B] < 1/10^9, so rounds is the number of iterations to - * get a false negative chance below once in a billion */ - static const unsigned int rounds[7] = {1, 30, 73, 156, 322, 653, 1316}; - /* We try multiplying the results with various odd numbers, which shouldn't - * influence the uniform distribution modulo a power of 2. */ - static const uint32_t mults[6] = {1, 3, 21, 289, 0x9999, 0x80402011}; - /* We only select up to 6 bits from the output to analyse */ - unsigned int usebits = bits > 6 ? 6 : bits; - unsigned int maxshift = bits - usebits; - /* For each of the maxshift+1 usebits-bit sequences inside a bits-bit - number, track all observed outcomes, one per bit in a uint64_t. */ - uint64_t x[6][27] = {{0}}; - unsigned int i, shift, m; - /* Multiply the output of all rand calls with the odd number m, which - should not change the uniformity of its distribution. */ - for (i = 0; i < rounds[usebits]; i++) { - uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits)); - CHECK((((uint64_t)r) >> bits) == 0); - for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { - uint32_t rm = r * mults[m]; - for (shift = 0; shift <= maxshift; shift++) { - x[m][shift] |= (((uint64_t)1) << ((rm >> shift) & ((1 << usebits) - 1))); - } - } - } - for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) { - for (shift = 0; shift <= maxshift; shift++) { - /* Test that the lower usebits bits of x[shift] are 1 */ - CHECK(((~x[m][shift]) << (64 - (1 << usebits))) == 0); - } - } -} - -/* Subrange must be a whole divisor of range, and at most 64 */ -void test_rand_int(uint32_t range, uint32_t subrange) { - /* (1-1/subrange)^rounds < 1/10^9 */ - int rounds = (subrange * 2073) / 100; - int i; - uint64_t x = 0; - CHECK((range % subrange) == 0); - for (i = 0; i < rounds; i++) { - uint32_t r = secp256k1_rand_int(range); - CHECK(r < range); - r = r % subrange; - x |= (((uint64_t)1) << r); - } - /* Test that the lower subrange bits of x are 1. */ - CHECK(((~x) << (64 - subrange)) == 0); -} - -void run_rand_bits(void) { - size_t b; - test_rand_bits(1, 32); - for (b = 1; b <= 32; b++) { - test_rand_bits(0, b); - } -} - -void run_rand_int(void) { - static const uint32_t ms[] = {1, 3, 17, 1000, 13771, 999999, 33554432}; - static const uint32_t ss[] = {1, 3, 6, 9, 13, 31, 64}; - unsigned int m, s; - for (m = 0; m < sizeof(ms) / sizeof(ms[0]); m++) { - for (s = 0; s < sizeof(ss) / sizeof(ss[0]); s++) { - test_rand_int(ms[m] * ss[s], ss[s]); - } - } -} - -/***** NUM TESTS *****/ - -#ifndef USE_NUM_NONE -void random_num_negate(secp256k1_num *num) { - if (secp256k1_rand_bits(1)) { - secp256k1_num_negate(num); - } -} - -void random_num_order_test(secp256k1_num *num) { - secp256k1_scalar sc; - random_scalar_order_test(&sc); - secp256k1_scalar_get_num(num, &sc); -} - -void random_num_order(secp256k1_num *num) { - secp256k1_scalar sc; - random_scalar_order(&sc); - secp256k1_scalar_get_num(num, &sc); -} - -void test_num_negate(void) { - secp256k1_num n1; - secp256k1_num n2; - random_num_order_test(&n1); /* n1 = R */ - random_num_negate(&n1); - secp256k1_num_copy(&n2, &n1); /* n2 = R */ - secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ - CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); /* n1 = R */ - secp256k1_num_negate(&n1); /* n1 = -R */ - CHECK(!secp256k1_num_is_zero(&n1)); - secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ - CHECK(secp256k1_num_is_zero(&n1)); - secp256k1_num_copy(&n1, &n2); /* n1 = R */ - secp256k1_num_negate(&n1); /* n1 = -R */ - CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2)); - secp256k1_num_negate(&n1); /* n1 = R */ - CHECK(secp256k1_num_eq(&n1, &n2)); -} - -void test_num_add_sub(void) { - int i; - secp256k1_scalar s; - secp256k1_num n1; - secp256k1_num n2; - secp256k1_num n1p2, n2p1, n1m2, n2m1; - random_num_order_test(&n1); /* n1 = R1 */ - if (secp256k1_rand_bits(1)) { - random_num_negate(&n1); - } - random_num_order_test(&n2); /* n2 = R2 */ - if (secp256k1_rand_bits(1)) { - random_num_negate(&n2); - } - secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ - secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ - secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ - secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ - CHECK(secp256k1_num_eq(&n1p2, &n2p1)); - CHECK(!secp256k1_num_eq(&n1p2, &n1m2)); - secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ - CHECK(secp256k1_num_eq(&n2m1, &n1m2)); - CHECK(!secp256k1_num_eq(&n2m1, &n1)); - secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ - CHECK(secp256k1_num_eq(&n2m1, &n1)); - CHECK(!secp256k1_num_eq(&n2p1, &n1)); - secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ - CHECK(secp256k1_num_eq(&n2p1, &n1)); - - /* check is_one */ - secp256k1_scalar_set_int(&s, 1); - secp256k1_scalar_get_num(&n1, &s); - CHECK(secp256k1_num_is_one(&n1)); - /* check that 2^n + 1 is never 1 */ - secp256k1_scalar_get_num(&n2, &s); - for (i = 0; i < 250; ++i) { - secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */ - secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ - CHECK(!secp256k1_num_is_one(&n1p2)); - } -} - -void test_num_mod(void) { - int i; - secp256k1_scalar s; - secp256k1_num order, n; - - /* check that 0 mod anything is 0 */ - random_scalar_order_test(&s); - secp256k1_scalar_get_num(&order, &s); - secp256k1_scalar_set_int(&s, 0); - secp256k1_scalar_get_num(&n, &s); - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); - - /* check that anything mod 1 is 0 */ - secp256k1_scalar_set_int(&s, 1); - secp256k1_scalar_get_num(&order, &s); - secp256k1_scalar_get_num(&n, &s); - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); - - /* check that increasing the number past 2^256 does not break this */ - random_scalar_order_test(&s); - secp256k1_scalar_get_num(&n, &s); - /* multiply by 2^8, which'll test this case with high probability */ - for (i = 0; i < 8; ++i) { - secp256k1_num_add(&n, &n, &n); - } - secp256k1_num_mod(&n, &order); - CHECK(secp256k1_num_is_zero(&n)); -} - -void test_num_jacobi(void) { - secp256k1_scalar sqr; - secp256k1_scalar small; - secp256k1_scalar five; /* five is not a quadratic residue */ - secp256k1_num order, n; - int i; - /* squares mod 5 are 1, 4 */ - const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; - - /* check some small values with 5 as the order */ - secp256k1_scalar_set_int(&five, 5); - secp256k1_scalar_get_num(&order, &five); - for (i = 0; i < 10; ++i) { - secp256k1_scalar_set_int(&small, i); - secp256k1_scalar_get_num(&n, &small); - CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]); - } - - /** test large values with 5 as group order */ - secp256k1_scalar_get_num(&order, &five); - /* we first need a scalar which is not a multiple of 5 */ - do { - secp256k1_num fiven; - random_scalar_order_test(&sqr); - secp256k1_scalar_get_num(&fiven, &five); - secp256k1_scalar_get_num(&n, &sqr); - secp256k1_num_mod(&n, &fiven); - } while (secp256k1_num_is_zero(&n)); - /* next force it to be a residue. 2 is a nonresidue mod 5 so we can - * just multiply by two, i.e. add the number to itself */ - if (secp256k1_num_jacobi(&n, &order) == -1) { - secp256k1_num_add(&n, &n, &n); - } - - /* test residue */ - CHECK(secp256k1_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - secp256k1_num_add(&n, &n, &n); - CHECK(secp256k1_num_jacobi(&n, &order) == -1); - - /** test with secp group order as order */ - secp256k1_scalar_order_get_num(&order); - random_scalar_order_test(&sqr); - secp256k1_scalar_sqr(&sqr, &sqr); - /* test residue */ - secp256k1_scalar_get_num(&n, &sqr); - CHECK(secp256k1_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - secp256k1_scalar_mul(&sqr, &sqr, &five); - secp256k1_scalar_get_num(&n, &sqr); - CHECK(secp256k1_num_jacobi(&n, &order) == -1); - /* test multiple of the order*/ - CHECK(secp256k1_num_jacobi(&order, &order) == 0); - - /* check one less than the order */ - secp256k1_scalar_set_int(&small, 1); - secp256k1_scalar_get_num(&n, &small); - secp256k1_num_sub(&n, &order, &n); - CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ -} - -void run_num_smalltests(void) { - int i; - for (i = 0; i < 100*count; i++) { - test_num_negate(); - test_num_add_sub(); - test_num_mod(); - test_num_jacobi(); - } -} -#endif - -/***** SCALAR TESTS *****/ - -void scalar_test(void) { - secp256k1_scalar s; - secp256k1_scalar s1; - secp256k1_scalar s2; -#ifndef USE_NUM_NONE - secp256k1_num snum, s1num, s2num; - secp256k1_num order, half_order; -#endif - unsigned char c[32]; - - /* Set 's' to a random scalar, with value 'snum'. */ - random_scalar_order_test(&s); - - /* Set 's1' to a random scalar, with value 's1num'. */ - random_scalar_order_test(&s1); - - /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */ - random_scalar_order_test(&s2); - secp256k1_scalar_get_b32(c, &s2); - -#ifndef USE_NUM_NONE - secp256k1_scalar_get_num(&snum, &s); - secp256k1_scalar_get_num(&s1num, &s1); - secp256k1_scalar_get_num(&s2num, &s2); - - secp256k1_scalar_order_get_num(&order); - half_order = order; - secp256k1_num_shift(&half_order, 1); -#endif - - { - int i; - /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */ - secp256k1_scalar n; - secp256k1_scalar_set_int(&n, 0); - for (i = 0; i < 256; i += 4) { - secp256k1_scalar t; - int j; - secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4)); - for (j = 0; j < 4; j++) { - secp256k1_scalar_add(&n, &n, &n); - } - secp256k1_scalar_add(&n, &n, &t); - } - CHECK(secp256k1_scalar_eq(&n, &s)); - } - - { - /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */ - secp256k1_scalar n; - int i = 0; - secp256k1_scalar_set_int(&n, 0); - while (i < 256) { - secp256k1_scalar t; - int j; - int now = secp256k1_rand_int(15) + 1; - if (now + i > 256) { - now = 256 - i; - } - secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now)); - for (j = 0; j < now; j++) { - secp256k1_scalar_add(&n, &n, &n); - } - secp256k1_scalar_add(&n, &n, &t); - i += now; - } - CHECK(secp256k1_scalar_eq(&n, &s)); - } - -#ifndef USE_NUM_NONE - { - /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */ - secp256k1_num rnum; - secp256k1_num r2num; - secp256k1_scalar r; - secp256k1_num_add(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &order); - secp256k1_scalar_add(&r, &s, &s2); - secp256k1_scalar_get_num(&r2num, &r); - CHECK(secp256k1_num_eq(&rnum, &r2num)); - } - - { - /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */ - secp256k1_scalar r; - secp256k1_num r2num; - secp256k1_num rnum; - secp256k1_num_mul(&rnum, &snum, &s2num); - secp256k1_num_mod(&rnum, &order); - secp256k1_scalar_mul(&r, &s, &s2); - secp256k1_scalar_get_num(&r2num, &r); - CHECK(secp256k1_num_eq(&rnum, &r2num)); - /* The result can only be zero if at least one of the factors was zero. */ - CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2))); - /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */ - CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2))); - CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s))); - } - - { - secp256k1_scalar neg; - secp256k1_num negnum; - secp256k1_num negnum2; - /* Check that comparison with zero matches comparison with zero on the number. */ - CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s)); - /* Check that comparison with the half order is equal to testing for high scalar. */ - CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0)); - secp256k1_scalar_negate(&neg, &s); - secp256k1_num_sub(&negnum, &order, &snum); - secp256k1_num_mod(&negnum, &order); - /* Check that comparison with the half order is equal to testing for high scalar after negation. */ - CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0)); - /* Negating should change the high property, unless the value was already zero. */ - CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s)); - secp256k1_scalar_get_num(&negnum2, &neg); - /* Negating a scalar should be equal to (order - n) mod order on the number. */ - CHECK(secp256k1_num_eq(&negnum, &negnum2)); - secp256k1_scalar_add(&neg, &neg, &s); - /* Adding a number to its negation should result in zero. */ - CHECK(secp256k1_scalar_is_zero(&neg)); - secp256k1_scalar_negate(&neg, &neg); - /* Negating zero should still result in zero. */ - CHECK(secp256k1_scalar_is_zero(&neg)); - } - - { - /* Test secp256k1_scalar_mul_shift_var. */ - secp256k1_scalar r; - secp256k1_num one; - secp256k1_num rnum; - secp256k1_num rnum2; - unsigned char cone[1] = {0x01}; - unsigned int shift = 256 + secp256k1_rand_int(257); - secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); - secp256k1_num_mul(&rnum, &s1num, &s2num); - secp256k1_num_shift(&rnum, shift - 1); - secp256k1_num_set_bin(&one, cone, 1); - secp256k1_num_add(&rnum, &rnum, &one); - secp256k1_num_shift(&rnum, 1); - secp256k1_scalar_get_num(&rnum2, &r); - CHECK(secp256k1_num_eq(&rnum, &rnum2)); - } - - { - /* test secp256k1_scalar_shr_int */ - secp256k1_scalar r; - int i; - random_scalar_order_test(&r); - for (i = 0; i < 100; ++i) { - int low; - int shift = 1 + secp256k1_rand_int(15); - int expected = r.d[0] % (1 << shift); - low = secp256k1_scalar_shr_int(&r, shift); - CHECK(expected == low); - } - } -#endif - - { - /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ - if (!secp256k1_scalar_is_zero(&s)) { - secp256k1_scalar inv; -#ifndef USE_NUM_NONE - secp256k1_num invnum; - secp256k1_num invnum2; -#endif - secp256k1_scalar_inverse(&inv, &s); -#ifndef USE_NUM_NONE - secp256k1_num_mod_inverse(&invnum, &snum, &order); - secp256k1_scalar_get_num(&invnum2, &inv); - CHECK(secp256k1_num_eq(&invnum, &invnum2)); -#endif - secp256k1_scalar_mul(&inv, &inv, &s); - /* Multiplying a scalar with its inverse must result in one. */ - CHECK(secp256k1_scalar_is_one(&inv)); - secp256k1_scalar_inverse(&inv, &inv); - /* Inverting one must result in one. */ - CHECK(secp256k1_scalar_is_one(&inv)); -#ifndef USE_NUM_NONE - secp256k1_scalar_get_num(&invnum, &inv); - CHECK(secp256k1_num_is_one(&invnum)); -#endif - } - } - - { - /* Test commutativity of add. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_add(&r2, &s2, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - secp256k1_scalar r1, r2; - secp256k1_scalar b; - int i; - /* Test add_bit. */ - int bit = secp256k1_rand_bits(8); - secp256k1_scalar_set_int(&b, 1); - CHECK(secp256k1_scalar_is_one(&b)); - for (i = 0; i < bit; i++) { - secp256k1_scalar_add(&b, &b, &b); - } - r1 = s1; - r2 = s1; - if (!secp256k1_scalar_add(&r1, &r1, &b)) { - /* No overflow happened. */ - secp256k1_scalar_cadd_bit(&r2, bit, 1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - /* cadd is a noop when flag is zero */ - secp256k1_scalar_cadd_bit(&r2, bit, 0); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - } - - { - /* Test commutativity of mul. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_mul(&r1, &s1, &s2); - secp256k1_scalar_mul(&r2, &s2, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - /* Test associativity of add. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_add(&r1, &r1, &s); - secp256k1_scalar_add(&r2, &s2, &s); - secp256k1_scalar_add(&r2, &s1, &r2); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - /* Test associativity of mul. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_mul(&r1, &s1, &s2); - secp256k1_scalar_mul(&r1, &r1, &s); - secp256k1_scalar_mul(&r2, &s2, &s); - secp256k1_scalar_mul(&r2, &s1, &r2); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - /* Test distributitivity of mul over add. */ - secp256k1_scalar r1, r2, t; - secp256k1_scalar_add(&r1, &s1, &s2); - secp256k1_scalar_mul(&r1, &r1, &s); - secp256k1_scalar_mul(&r2, &s1, &s); - secp256k1_scalar_mul(&t, &s2, &s); - secp256k1_scalar_add(&r2, &r2, &t); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - /* Test square. */ - secp256k1_scalar r1, r2; - secp256k1_scalar_sqr(&r1, &s1); - secp256k1_scalar_mul(&r2, &s1, &s1); - CHECK(secp256k1_scalar_eq(&r1, &r2)); - } - - { - /* Test multiplicative identity. */ - secp256k1_scalar r1, v1; - secp256k1_scalar_set_int(&v1,1); - secp256k1_scalar_mul(&r1, &s1, &v1); - CHECK(secp256k1_scalar_eq(&r1, &s1)); - } - - { - /* Test additive identity. */ - secp256k1_scalar r1, v0; - secp256k1_scalar_set_int(&v0,0); - secp256k1_scalar_add(&r1, &s1, &v0); - CHECK(secp256k1_scalar_eq(&r1, &s1)); - } - - { - /* Test zero product property. */ - secp256k1_scalar r1, v0; - secp256k1_scalar_set_int(&v0,0); - secp256k1_scalar_mul(&r1, &s1, &v0); - CHECK(secp256k1_scalar_eq(&r1, &v0)); - } - -} - -void run_scalar_tests(void) { - int i; - for (i = 0; i < 128 * count; i++) { - scalar_test(); - } - - { - /* (-1)+1 should be zero. */ - secp256k1_scalar s, o; - secp256k1_scalar_set_int(&s, 1); - CHECK(secp256k1_scalar_is_one(&s)); - secp256k1_scalar_negate(&o, &s); - secp256k1_scalar_add(&o, &o, &s); - CHECK(secp256k1_scalar_is_zero(&o)); - secp256k1_scalar_negate(&o, &o); - CHECK(secp256k1_scalar_is_zero(&o)); - } - -#ifndef USE_NUM_NONE - { - /* A scalar with value of the curve order should be 0. */ - secp256k1_num order; - secp256k1_scalar zero; - unsigned char bin[32]; - int overflow = 0; - secp256k1_scalar_order_get_num(&order); - secp256k1_num_get_bin(bin, 32, &order); - secp256k1_scalar_set_b32(&zero, bin, &overflow); - CHECK(overflow == 1); - CHECK(secp256k1_scalar_is_zero(&zero)); - } -#endif - - { - /* Does check_overflow check catch all ones? */ - static const secp256k1_scalar overflowed = SECP256K1_SCALAR_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL - ); - CHECK(secp256k1_scalar_check_overflow(&overflowed)); - } - - { - /* Static test vectors. - * These were reduced from ~10^12 random vectors based on comparison-decision - * and edge-case coverage on 32-bit and 64-bit implementations. - * The responses were generated with Sage 5.9. - */ - secp256k1_scalar x; - secp256k1_scalar y; - secp256k1_scalar z; - secp256k1_scalar zz; - secp256k1_scalar one; - secp256k1_scalar r1; - secp256k1_scalar r2; -#if defined(USE_SCALAR_INV_NUM) - secp256k1_scalar zzv; -#endif - int overflow; - unsigned char chal[33][2][32] = { - {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, - 0xff, 0xff, 0x03, 0x00, 0xc0, 0xff, 0xff, 0xff}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff}}, - {{0xef, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x80, 0xff}}, - {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x80, 0xff, 0x3f, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x00}, - {0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0xe0, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x00, 0x1e, 0xf8, 0xff, 0xff, 0xff, 0xfd, 0xff}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, - 0x00, 0x00, 0x00, 0xf8, 0xff, 0x03, 0x00, 0xe0, - 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff, - 0xf3, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00, - 0x00, 0x1c, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, - 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x1f, 0x00, 0x00, 0x80, 0xff, 0xff, 0x3f, - 0x00, 0xfe, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0xff, 0x00, 0x0f, 0xfc, 0x9f, - 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0x0f, 0xfc, 0xff, 0x7f, 0x00, 0x00, 0x00, - 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, - {0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - 0x00, 0x00, 0xf8, 0xff, 0x0f, 0xc0, 0xff, 0xff, - 0xff, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x07, 0x80, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, - 0xf7, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0x00, - 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf0}, - {0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, - {{0x00, 0xf8, 0xff, 0x03, 0xff, 0xff, 0xff, 0x00, - 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x03, 0xc0, 0xff, 0x0f, 0xfc, 0xff}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, - 0xff, 0x01, 0x00, 0x00, 0x00, 0x3f, 0x00, 0xc0, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, - {{0x8f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x7f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x03, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0x00, 0x00, 0x80, 0xff, 0x7f}, - {0xff, 0xcf, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x00, 0xc0, 0xff, 0xcf, 0xff, 0xff, 0xff, 0xff, - 0xbf, 0xff, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, - 0xff, 0xff, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0x01, 0xfc, 0xff, 0x01, 0x00, 0xfe, 0xff}, - {0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00}}, - {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x7f, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xf8, 0xff, 0x01, 0x00, 0xf0, 0xff, 0xff, - 0xe0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x00}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, - 0xfc, 0xff, 0xff, 0x3f, 0xf0, 0xff, 0xff, 0x3f, - 0x00, 0x00, 0xf8, 0x07, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x0f, 0x7e, 0x00, 0x00}}, - {{0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x1f, 0x00, 0x00, 0xfe, 0x07, 0x00}, - {0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60}}, - {{0xff, 0x01, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00, - 0x80, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0xff, 0xff, 0x1f, 0x00, 0xf0, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00}}, - {{0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, - 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xc0, 0xff, 0xff, 0xcf, 0xff, 0x1f, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x7e, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00}, - {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - 0xff, 0xff, 0x7f, 0x00, 0x80, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, - {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x80, - 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, - 0xff, 0x7f, 0xf8, 0xff, 0xff, 0x1f, 0x00, 0xfe}}, - {{0xff, 0xff, 0xff, 0x3f, 0xf8, 0xff, 0xff, 0xff, - 0xff, 0x03, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x00, - 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x80, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xc0, - 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, - 0xf0, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff}}, - {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x7e, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x07, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - {0xff, 0x01, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, - {{0xff, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x00, - 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, - 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0x3f, 0x00, 0xf8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0x3f, 0x00, 0x00, 0xc0, 0xf1, 0x7f, 0x00}}, - {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00}, - {0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, - 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x80, 0x1f, - 0x00, 0x00, 0xfc, 0xff, 0xff, 0x01, 0xff, 0xff}}, - {{0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x80, 0x00, 0x00, 0x80, 0xff, 0x03, 0xe0, 0x01, - 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xfc, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}, - {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0xfe, 0xff, 0xff, 0xf0, 0x07, 0x00, 0x3c, 0x80, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0x07, 0xe0, 0xff, 0x00, 0x00, 0x00}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, - 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0xf8, - 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80}, - {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x80, 0x00, - 0x00, 0x00, 0x00, 0xc0, 0x7f, 0xfe, 0xff, 0x1f, - 0x00, 0xfe, 0xff, 0x03, 0x00, 0x00, 0xfe, 0xff}}, - {{0xff, 0xff, 0x81, 0xff, 0xff, 0xff, 0xff, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x83, - 0xff, 0xff, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80, - 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xf0}, - {0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, - 0xf8, 0x07, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}}, - {{0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, - 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, - 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}, - {0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, - 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00, - 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb, - 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}} - }; - unsigned char res[33][2][32] = { - {{0x0c, 0x3b, 0x0a, 0xca, 0x8d, 0x1a, 0x2f, 0xb9, - 0x8a, 0x7b, 0x53, 0x5a, 0x1f, 0xc5, 0x22, 0xa1, - 0x07, 0x2a, 0x48, 0xea, 0x02, 0xeb, 0xb3, 0xd6, - 0x20, 0x1e, 0x86, 0xd0, 0x95, 0xf6, 0x92, 0x35}, - {0xdc, 0x90, 0x7a, 0x07, 0x2e, 0x1e, 0x44, 0x6d, - 0xf8, 0x15, 0x24, 0x5b, 0x5a, 0x96, 0x37, 0x9c, - 0x37, 0x7b, 0x0d, 0xac, 0x1b, 0x65, 0x58, 0x49, - 0x43, 0xb7, 0x31, 0xbb, 0xa7, 0xf4, 0x97, 0x15}}, - {{0xf1, 0xf7, 0x3a, 0x50, 0xe6, 0x10, 0xba, 0x22, - 0x43, 0x4d, 0x1f, 0x1f, 0x7c, 0x27, 0xca, 0x9c, - 0xb8, 0xb6, 0xa0, 0xfc, 0xd8, 0xc0, 0x05, 0x2f, - 0xf7, 0x08, 0xe1, 0x76, 0xdd, 0xd0, 0x80, 0xc8}, - {0xe3, 0x80, 0x80, 0xb8, 0xdb, 0xe3, 0xa9, 0x77, - 0x00, 0xb0, 0xf5, 0x2e, 0x27, 0xe2, 0x68, 0xc4, - 0x88, 0xe8, 0x04, 0xc1, 0x12, 0xbf, 0x78, 0x59, - 0xe6, 0xa9, 0x7c, 0xe1, 0x81, 0xdd, 0xb9, 0xd5}}, - {{0x96, 0xe2, 0xee, 0x01, 0xa6, 0x80, 0x31, 0xef, - 0x5c, 0xd0, 0x19, 0xb4, 0x7d, 0x5f, 0x79, 0xab, - 0xa1, 0x97, 0xd3, 0x7e, 0x33, 0xbb, 0x86, 0x55, - 0x60, 0x20, 0x10, 0x0d, 0x94, 0x2d, 0x11, 0x7c}, - {0xcc, 0xab, 0xe0, 0xe8, 0x98, 0x65, 0x12, 0x96, - 0x38, 0x5a, 0x1a, 0xf2, 0x85, 0x23, 0x59, 0x5f, - 0xf9, 0xf3, 0xc2, 0x81, 0x70, 0x92, 0x65, 0x12, - 0x9c, 0x65, 0x1e, 0x96, 0x00, 0xef, 0xe7, 0x63}}, - {{0xac, 0x1e, 0x62, 0xc2, 0x59, 0xfc, 0x4e, 0x5c, - 0x83, 0xb0, 0xd0, 0x6f, 0xce, 0x19, 0xf6, 0xbf, - 0xa4, 0xb0, 0xe0, 0x53, 0x66, 0x1f, 0xbf, 0xc9, - 0x33, 0x47, 0x37, 0xa9, 0x3d, 0x5d, 0xb0, 0x48}, - {0x86, 0xb9, 0x2a, 0x7f, 0x8e, 0xa8, 0x60, 0x42, - 0x26, 0x6d, 0x6e, 0x1c, 0xa2, 0xec, 0xe0, 0xe5, - 0x3e, 0x0a, 0x33, 0xbb, 0x61, 0x4c, 0x9f, 0x3c, - 0xd1, 0xdf, 0x49, 0x33, 0xcd, 0x72, 0x78, 0x18}}, - {{0xf7, 0xd3, 0xcd, 0x49, 0x5c, 0x13, 0x22, 0xfb, - 0x2e, 0xb2, 0x2f, 0x27, 0xf5, 0x8a, 0x5d, 0x74, - 0xc1, 0x58, 0xc5, 0xc2, 0x2d, 0x9f, 0x52, 0xc6, - 0x63, 0x9f, 0xba, 0x05, 0x76, 0x45, 0x7a, 0x63}, - {0x8a, 0xfa, 0x55, 0x4d, 0xdd, 0xa3, 0xb2, 0xc3, - 0x44, 0xfd, 0xec, 0x72, 0xde, 0xef, 0xc0, 0x99, - 0xf5, 0x9f, 0xe2, 0x52, 0xb4, 0x05, 0x32, 0x58, - 0x57, 0xc1, 0x8f, 0xea, 0xc3, 0x24, 0x5b, 0x94}}, - {{0x05, 0x83, 0xee, 0xdd, 0x64, 0xf0, 0x14, 0x3b, - 0xa0, 0x14, 0x4a, 0x3a, 0x41, 0x82, 0x7c, 0xa7, - 0x2c, 0xaa, 0xb1, 0x76, 0xbb, 0x59, 0x64, 0x5f, - 0x52, 0xad, 0x25, 0x29, 0x9d, 0x8f, 0x0b, 0xb0}, - {0x7e, 0xe3, 0x7c, 0xca, 0xcd, 0x4f, 0xb0, 0x6d, - 0x7a, 0xb2, 0x3e, 0xa0, 0x08, 0xb9, 0xa8, 0x2d, - 0xc2, 0xf4, 0x99, 0x66, 0xcc, 0xac, 0xd8, 0xb9, - 0x72, 0x2a, 0x4a, 0x3e, 0x0f, 0x7b, 0xbf, 0xf4}}, - {{0x8c, 0x9c, 0x78, 0x2b, 0x39, 0x61, 0x7e, 0xf7, - 0x65, 0x37, 0x66, 0x09, 0x38, 0xb9, 0x6f, 0x70, - 0x78, 0x87, 0xff, 0xcf, 0x93, 0xca, 0x85, 0x06, - 0x44, 0x84, 0xa7, 0xfe, 0xd3, 0xa4, 0xe3, 0x7e}, - {0xa2, 0x56, 0x49, 0x23, 0x54, 0xa5, 0x50, 0xe9, - 0x5f, 0xf0, 0x4d, 0xe7, 0xdc, 0x38, 0x32, 0x79, - 0x4f, 0x1c, 0xb7, 0xe4, 0xbb, 0xf8, 0xbb, 0x2e, - 0x40, 0x41, 0x4b, 0xcc, 0xe3, 0x1e, 0x16, 0x36}}, - {{0x0c, 0x1e, 0xd7, 0x09, 0x25, 0x40, 0x97, 0xcb, - 0x5c, 0x46, 0xa8, 0xda, 0xef, 0x25, 0xd5, 0xe5, - 0x92, 0x4d, 0xcf, 0xa3, 0xc4, 0x5d, 0x35, 0x4a, - 0xe4, 0x61, 0x92, 0xf3, 0xbf, 0x0e, 0xcd, 0xbe}, - {0xe4, 0xaf, 0x0a, 0xb3, 0x30, 0x8b, 0x9b, 0x48, - 0x49, 0x43, 0xc7, 0x64, 0x60, 0x4a, 0x2b, 0x9e, - 0x95, 0x5f, 0x56, 0xe8, 0x35, 0xdc, 0xeb, 0xdc, - 0xc7, 0xc4, 0xfe, 0x30, 0x40, 0xc7, 0xbf, 0xa4}}, - {{0xd4, 0xa0, 0xf5, 0x81, 0x49, 0x6b, 0xb6, 0x8b, - 0x0a, 0x69, 0xf9, 0xfe, 0xa8, 0x32, 0xe5, 0xe0, - 0xa5, 0xcd, 0x02, 0x53, 0xf9, 0x2c, 0xe3, 0x53, - 0x83, 0x36, 0xc6, 0x02, 0xb5, 0xeb, 0x64, 0xb8}, - {0x1d, 0x42, 0xb9, 0xf9, 0xe9, 0xe3, 0x93, 0x2c, - 0x4c, 0xee, 0x6c, 0x5a, 0x47, 0x9e, 0x62, 0x01, - 0x6b, 0x04, 0xfe, 0xa4, 0x30, 0x2b, 0x0d, 0x4f, - 0x71, 0x10, 0xd3, 0x55, 0xca, 0xf3, 0x5e, 0x80}}, - {{0x77, 0x05, 0xf6, 0x0c, 0x15, 0x9b, 0x45, 0xe7, - 0xb9, 0x11, 0xb8, 0xf5, 0xd6, 0xda, 0x73, 0x0c, - 0xda, 0x92, 0xea, 0xd0, 0x9d, 0xd0, 0x18, 0x92, - 0xce, 0x9a, 0xaa, 0xee, 0x0f, 0xef, 0xde, 0x30}, - {0xf1, 0xf1, 0xd6, 0x9b, 0x51, 0xd7, 0x77, 0x62, - 0x52, 0x10, 0xb8, 0x7a, 0x84, 0x9d, 0x15, 0x4e, - 0x07, 0xdc, 0x1e, 0x75, 0x0d, 0x0c, 0x3b, 0xdb, - 0x74, 0x58, 0x62, 0x02, 0x90, 0x54, 0x8b, 0x43}}, - {{0xa6, 0xfe, 0x0b, 0x87, 0x80, 0x43, 0x67, 0x25, - 0x57, 0x5d, 0xec, 0x40, 0x50, 0x08, 0xd5, 0x5d, - 0x43, 0xd7, 0xe0, 0xaa, 0xe0, 0x13, 0xb6, 0xb0, - 0xc0, 0xd4, 0xe5, 0x0d, 0x45, 0x83, 0xd6, 0x13}, - {0x40, 0x45, 0x0a, 0x92, 0x31, 0xea, 0x8c, 0x60, - 0x8c, 0x1f, 0xd8, 0x76, 0x45, 0xb9, 0x29, 0x00, - 0x26, 0x32, 0xd8, 0xa6, 0x96, 0x88, 0xe2, 0xc4, - 0x8b, 0xdb, 0x7f, 0x17, 0x87, 0xcc, 0xc8, 0xf2}}, - {{0xc2, 0x56, 0xe2, 0xb6, 0x1a, 0x81, 0xe7, 0x31, - 0x63, 0x2e, 0xbb, 0x0d, 0x2f, 0x81, 0x67, 0xd4, - 0x22, 0xe2, 0x38, 0x02, 0x25, 0x97, 0xc7, 0x88, - 0x6e, 0xdf, 0xbe, 0x2a, 0xa5, 0x73, 0x63, 0xaa}, - {0x50, 0x45, 0xe2, 0xc3, 0xbd, 0x89, 0xfc, 0x57, - 0xbd, 0x3c, 0xa3, 0x98, 0x7e, 0x7f, 0x36, 0x38, - 0x92, 0x39, 0x1f, 0x0f, 0x81, 0x1a, 0x06, 0x51, - 0x1f, 0x8d, 0x6a, 0xff, 0x47, 0x16, 0x06, 0x9c}}, - {{0x33, 0x95, 0xa2, 0x6f, 0x27, 0x5f, 0x9c, 0x9c, - 0x64, 0x45, 0xcb, 0xd1, 0x3c, 0xee, 0x5e, 0x5f, - 0x48, 0xa6, 0xaf, 0xe3, 0x79, 0xcf, 0xb1, 0xe2, - 0xbf, 0x55, 0x0e, 0xa2, 0x3b, 0x62, 0xf0, 0xe4}, - {0x14, 0xe8, 0x06, 0xe3, 0xbe, 0x7e, 0x67, 0x01, - 0xc5, 0x21, 0x67, 0xd8, 0x54, 0xb5, 0x7f, 0xa4, - 0xf9, 0x75, 0x70, 0x1c, 0xfd, 0x79, 0xdb, 0x86, - 0xad, 0x37, 0x85, 0x83, 0x56, 0x4e, 0xf0, 0xbf}}, - {{0xbc, 0xa6, 0xe0, 0x56, 0x4e, 0xef, 0xfa, 0xf5, - 0x1d, 0x5d, 0x3f, 0x2a, 0x5b, 0x19, 0xab, 0x51, - 0xc5, 0x8b, 0xdd, 0x98, 0x28, 0x35, 0x2f, 0xc3, - 0x81, 0x4f, 0x5c, 0xe5, 0x70, 0xb9, 0xeb, 0x62}, - {0xc4, 0x6d, 0x26, 0xb0, 0x17, 0x6b, 0xfe, 0x6c, - 0x12, 0xf8, 0xe7, 0xc1, 0xf5, 0x2f, 0xfa, 0x91, - 0x13, 0x27, 0xbd, 0x73, 0xcc, 0x33, 0x31, 0x1c, - 0x39, 0xe3, 0x27, 0x6a, 0x95, 0xcf, 0xc5, 0xfb}}, - {{0x30, 0xb2, 0x99, 0x84, 0xf0, 0x18, 0x2a, 0x6e, - 0x1e, 0x27, 0xed, 0xa2, 0x29, 0x99, 0x41, 0x56, - 0xe8, 0xd4, 0x0d, 0xef, 0x99, 0x9c, 0xf3, 0x58, - 0x29, 0x55, 0x1a, 0xc0, 0x68, 0xd6, 0x74, 0xa4}, - {0x07, 0x9c, 0xe7, 0xec, 0xf5, 0x36, 0x73, 0x41, - 0xa3, 0x1c, 0xe5, 0x93, 0x97, 0x6a, 0xfd, 0xf7, - 0x53, 0x18, 0xab, 0xaf, 0xeb, 0x85, 0xbd, 0x92, - 0x90, 0xab, 0x3c, 0xbf, 0x30, 0x82, 0xad, 0xf6}}, - {{0xc6, 0x87, 0x8a, 0x2a, 0xea, 0xc0, 0xa9, 0xec, - 0x6d, 0xd3, 0xdc, 0x32, 0x23, 0xce, 0x62, 0x19, - 0xa4, 0x7e, 0xa8, 0xdd, 0x1c, 0x33, 0xae, 0xd3, - 0x4f, 0x62, 0x9f, 0x52, 0xe7, 0x65, 0x46, 0xf4}, - {0x97, 0x51, 0x27, 0x67, 0x2d, 0xa2, 0x82, 0x87, - 0x98, 0xd3, 0xb6, 0x14, 0x7f, 0x51, 0xd3, 0x9a, - 0x0b, 0xd0, 0x76, 0x81, 0xb2, 0x4f, 0x58, 0x92, - 0xa4, 0x86, 0xa1, 0xa7, 0x09, 0x1d, 0xef, 0x9b}}, - {{0xb3, 0x0f, 0x2b, 0x69, 0x0d, 0x06, 0x90, 0x64, - 0xbd, 0x43, 0x4c, 0x10, 0xe8, 0x98, 0x1c, 0xa3, - 0xe1, 0x68, 0xe9, 0x79, 0x6c, 0x29, 0x51, 0x3f, - 0x41, 0xdc, 0xdf, 0x1f, 0xf3, 0x60, 0xbe, 0x33}, - {0xa1, 0x5f, 0xf7, 0x1d, 0xb4, 0x3e, 0x9b, 0x3c, - 0xe7, 0xbd, 0xb6, 0x06, 0xd5, 0x60, 0x06, 0x6d, - 0x50, 0xd2, 0xf4, 0x1a, 0x31, 0x08, 0xf2, 0xea, - 0x8e, 0xef, 0x5f, 0x7d, 0xb6, 0xd0, 0xc0, 0x27}}, - {{0x62, 0x9a, 0xd9, 0xbb, 0x38, 0x36, 0xce, 0xf7, - 0x5d, 0x2f, 0x13, 0xec, 0xc8, 0x2d, 0x02, 0x8a, - 0x2e, 0x72, 0xf0, 0xe5, 0x15, 0x9d, 0x72, 0xae, - 0xfc, 0xb3, 0x4f, 0x02, 0xea, 0xe1, 0x09, 0xfe}, - {0x00, 0x00, 0x00, 0x00, 0xfa, 0x0a, 0x3d, 0xbc, - 0xad, 0x16, 0x0c, 0xb6, 0xe7, 0x7c, 0x8b, 0x39, - 0x9a, 0x43, 0xbb, 0xe3, 0xc2, 0x55, 0x15, 0x14, - 0x75, 0xac, 0x90, 0x9b, 0x7f, 0x9a, 0x92, 0x00}}, - {{0x8b, 0xac, 0x70, 0x86, 0x29, 0x8f, 0x00, 0x23, - 0x7b, 0x45, 0x30, 0xaa, 0xb8, 0x4c, 0xc7, 0x8d, - 0x4e, 0x47, 0x85, 0xc6, 0x19, 0xe3, 0x96, 0xc2, - 0x9a, 0xa0, 0x12, 0xed, 0x6f, 0xd7, 0x76, 0x16}, - {0x45, 0xaf, 0x7e, 0x33, 0xc7, 0x7f, 0x10, 0x6c, - 0x7c, 0x9f, 0x29, 0xc1, 0xa8, 0x7e, 0x15, 0x84, - 0xe7, 0x7d, 0xc0, 0x6d, 0xab, 0x71, 0x5d, 0xd0, - 0x6b, 0x9f, 0x97, 0xab, 0xcb, 0x51, 0x0c, 0x9f}}, - {{0x9e, 0xc3, 0x92, 0xb4, 0x04, 0x9f, 0xc8, 0xbb, - 0xdd, 0x9e, 0xc6, 0x05, 0xfd, 0x65, 0xec, 0x94, - 0x7f, 0x2c, 0x16, 0xc4, 0x40, 0xac, 0x63, 0x7b, - 0x7d, 0xb8, 0x0c, 0xe4, 0x5b, 0xe3, 0xa7, 0x0e}, - {0x43, 0xf4, 0x44, 0xe8, 0xcc, 0xc8, 0xd4, 0x54, - 0x33, 0x37, 0x50, 0xf2, 0x87, 0x42, 0x2e, 0x00, - 0x49, 0x60, 0x62, 0x02, 0xfd, 0x1a, 0x7c, 0xdb, - 0x29, 0x6c, 0x6d, 0x54, 0x53, 0x08, 0xd1, 0xc8}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, - {{0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, - 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, - 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, - 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}, - {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, - 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, - 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, - 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}}, - {{0x28, 0x56, 0xac, 0x0e, 0x4f, 0x98, 0x09, 0xf0, - 0x49, 0xfa, 0x7f, 0x84, 0xac, 0x7e, 0x50, 0x5b, - 0x17, 0x43, 0x14, 0x89, 0x9c, 0x53, 0xa8, 0x94, - 0x30, 0xf2, 0x11, 0x4d, 0x92, 0x14, 0x27, 0xe8}, - {0x39, 0x7a, 0x84, 0x56, 0x79, 0x9d, 0xec, 0x26, - 0x2c, 0x53, 0xc1, 0x94, 0xc9, 0x8d, 0x9e, 0x9d, - 0x32, 0x1f, 0xdd, 0x84, 0x04, 0xe8, 0xe2, 0x0a, - 0x6b, 0xbe, 0xbb, 0x42, 0x40, 0x67, 0x30, 0x6c}}, - {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, - 0x40, 0x2d, 0xa1, 0x73, 0x2f, 0xc9, 0xbe, 0xbd}, - {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1, - 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0, - 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59, - 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}}, - {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}}, - {{0x1c, 0xc4, 0xf7, 0xda, 0x0f, 0x65, 0xca, 0x39, - 0x70, 0x52, 0x92, 0x8e, 0xc3, 0xc8, 0x15, 0xea, - 0x7f, 0x10, 0x9e, 0x77, 0x4b, 0x6e, 0x2d, 0xdf, - 0xe8, 0x30, 0x9d, 0xda, 0xe8, 0x9a, 0x65, 0xae}, - {0x02, 0xb0, 0x16, 0xb1, 0x1d, 0xc8, 0x57, 0x7b, - 0xa2, 0x3a, 0xa2, 0xa3, 0x38, 0x5c, 0x8f, 0xeb, - 0x66, 0x37, 0x91, 0xa8, 0x5f, 0xef, 0x04, 0xf6, - 0x59, 0x75, 0xe1, 0xee, 0x92, 0xf6, 0x0e, 0x30}}, - {{0x8d, 0x76, 0x14, 0xa4, 0x14, 0x06, 0x9f, 0x9a, - 0xdf, 0x4a, 0x85, 0xa7, 0x6b, 0xbf, 0x29, 0x6f, - 0xbc, 0x34, 0x87, 0x5d, 0xeb, 0xbb, 0x2e, 0xa9, - 0xc9, 0x1f, 0x58, 0xd6, 0x9a, 0x82, 0xa0, 0x56}, - {0xd4, 0xb9, 0xdb, 0x88, 0x1d, 0x04, 0xe9, 0x93, - 0x8d, 0x3f, 0x20, 0xd5, 0x86, 0xa8, 0x83, 0x07, - 0xdb, 0x09, 0xd8, 0x22, 0x1f, 0x7f, 0xf1, 0x71, - 0xc8, 0xe7, 0x5d, 0x47, 0xaf, 0x8b, 0x72, 0xe9}}, - {{0x83, 0xb9, 0x39, 0xb2, 0xa4, 0xdf, 0x46, 0x87, - 0xc2, 0xb8, 0xf1, 0xe6, 0x4c, 0xd1, 0xe2, 0xa9, - 0xe4, 0x70, 0x30, 0x34, 0xbc, 0x52, 0x7c, 0x55, - 0xa6, 0xec, 0x80, 0xa4, 0xe5, 0xd2, 0xdc, 0x73}, - {0x08, 0xf1, 0x03, 0xcf, 0x16, 0x73, 0xe8, 0x7d, - 0xb6, 0x7e, 0x9b, 0xc0, 0xb4, 0xc2, 0xa5, 0x86, - 0x02, 0x77, 0xd5, 0x27, 0x86, 0xa5, 0x15, 0xfb, - 0xae, 0x9b, 0x8c, 0xa9, 0xf9, 0xf8, 0xa8, 0x4a}}, - {{0x8b, 0x00, 0x49, 0xdb, 0xfa, 0xf0, 0x1b, 0xa2, - 0xed, 0x8a, 0x9a, 0x7a, 0x36, 0x78, 0x4a, 0xc7, - 0xf7, 0xad, 0x39, 0xd0, 0x6c, 0x65, 0x7a, 0x41, - 0xce, 0xd6, 0xd6, 0x4c, 0x20, 0x21, 0x6b, 0xc7}, - {0xc6, 0xca, 0x78, 0x1d, 0x32, 0x6c, 0x6c, 0x06, - 0x91, 0xf2, 0x1a, 0xe8, 0x43, 0x16, 0xea, 0x04, - 0x3c, 0x1f, 0x07, 0x85, 0xf7, 0x09, 0x22, 0x08, - 0xba, 0x13, 0xfd, 0x78, 0x1e, 0x3f, 0x6f, 0x62}}, - {{0x25, 0x9b, 0x7c, 0xb0, 0xac, 0x72, 0x6f, 0xb2, - 0xe3, 0x53, 0x84, 0x7a, 0x1a, 0x9a, 0x98, 0x9b, - 0x44, 0xd3, 0x59, 0xd0, 0x8e, 0x57, 0x41, 0x40, - 0x78, 0xa7, 0x30, 0x2f, 0x4c, 0x9c, 0xb9, 0x68}, - {0xb7, 0x75, 0x03, 0x63, 0x61, 0xc2, 0x48, 0x6e, - 0x12, 0x3d, 0xbf, 0x4b, 0x27, 0xdf, 0xb1, 0x7a, - 0xff, 0x4e, 0x31, 0x07, 0x83, 0xf4, 0x62, 0x5b, - 0x19, 0xa5, 0xac, 0xa0, 0x32, 0x58, 0x0d, 0xa7}}, - {{0x43, 0x4f, 0x10, 0xa4, 0xca, 0xdb, 0x38, 0x67, - 0xfa, 0xae, 0x96, 0xb5, 0x6d, 0x97, 0xff, 0x1f, - 0xb6, 0x83, 0x43, 0xd3, 0xa0, 0x2d, 0x70, 0x7a, - 0x64, 0x05, 0x4c, 0xa7, 0xc1, 0xa5, 0x21, 0x51}, - {0xe4, 0xf1, 0x23, 0x84, 0xe1, 0xb5, 0x9d, 0xf2, - 0xb8, 0x73, 0x8b, 0x45, 0x2b, 0x35, 0x46, 0x38, - 0x10, 0x2b, 0x50, 0xf8, 0x8b, 0x35, 0xcd, 0x34, - 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}}, - {{0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, - 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, - 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, - 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}, - {0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34, - 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13, - 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46, - 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}} - }; - secp256k1_scalar_set_int(&one, 1); - for (i = 0; i < 33; i++) { - secp256k1_scalar_set_b32(&x, chal[i][0], &overflow); - CHECK(!overflow); - secp256k1_scalar_set_b32(&y, chal[i][1], &overflow); - CHECK(!overflow); - secp256k1_scalar_set_b32(&r1, res[i][0], &overflow); - CHECK(!overflow); - secp256k1_scalar_set_b32(&r2, res[i][1], &overflow); - CHECK(!overflow); - secp256k1_scalar_mul(&z, &x, &y); - CHECK(!secp256k1_scalar_check_overflow(&z)); - CHECK(secp256k1_scalar_eq(&r1, &z)); - if (!secp256k1_scalar_is_zero(&y)) { - secp256k1_scalar_inverse(&zz, &y); - CHECK(!secp256k1_scalar_check_overflow(&zz)); -#if defined(USE_SCALAR_INV_NUM) - secp256k1_scalar_inverse_var(&zzv, &y); - CHECK(secp256k1_scalar_eq(&zzv, &zz)); -#endif - secp256k1_scalar_mul(&z, &z, &zz); - CHECK(!secp256k1_scalar_check_overflow(&z)); - CHECK(secp256k1_scalar_eq(&x, &z)); - secp256k1_scalar_mul(&zz, &zz, &y); - CHECK(!secp256k1_scalar_check_overflow(&zz)); - CHECK(secp256k1_scalar_eq(&one, &zz)); - } - secp256k1_scalar_mul(&z, &x, &x); - CHECK(!secp256k1_scalar_check_overflow(&z)); - secp256k1_scalar_sqr(&zz, &x); - CHECK(!secp256k1_scalar_check_overflow(&zz)); - CHECK(secp256k1_scalar_eq(&zz, &z)); - CHECK(secp256k1_scalar_eq(&r2, &zz)); - } - } -} - -/***** FIELD TESTS *****/ - -void random_fe(secp256k1_fe *x) { - unsigned char bin[32]; - do { - secp256k1_rand256(bin); - if (secp256k1_fe_set_b32(x, bin)) { - return; - } - } while(1); -} - -void random_fe_test(secp256k1_fe *x) { - unsigned char bin[32]; - do { - secp256k1_rand256_test(bin); - if (secp256k1_fe_set_b32(x, bin)) { - return; - } - } while(1); -} - -void random_fe_non_zero(secp256k1_fe *nz) { - int tries = 10; - while (--tries >= 0) { - random_fe(nz); - secp256k1_fe_normalize(nz); - if (!secp256k1_fe_is_zero(nz)) { - break; - } - } - /* Infinitesimal probability of spurious failure here */ - CHECK(tries >= 0); -} - -void random_fe_non_square(secp256k1_fe *ns) { - secp256k1_fe r; - random_fe_non_zero(ns); - if (secp256k1_fe_sqrt(&r, ns)) { - secp256k1_fe_negate(ns, ns, 1); - } -} - -int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe an = *a; - secp256k1_fe bn = *b; - secp256k1_fe_normalize_weak(&an); - secp256k1_fe_normalize_var(&bn); - return secp256k1_fe_equal_var(&an, &bn); -} - -int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) { - secp256k1_fe x; - secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_fe_mul(&x, a, ai); - return check_fe_equal(&x, &one); -} - -void run_field_convert(void) { - static const unsigned char b32[32] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, - 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40 - }; - static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST( - 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, - 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL - ); - static const secp256k1_fe fe = SECP256K1_FE_CONST( - 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL, - 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL - ); - secp256k1_fe fe2; - unsigned char b322[32]; - secp256k1_fe_storage fes2; - /* Check conversions to fe. */ - CHECK(secp256k1_fe_set_b32(&fe2, b32)); - CHECK(secp256k1_fe_equal_var(&fe, &fe2)); - secp256k1_fe_from_storage(&fe2, &fes); - CHECK(secp256k1_fe_equal_var(&fe, &fe2)); - /* Check conversion from fe. */ - secp256k1_fe_get_b32(b322, &fe); - CHECK(memcmp(b322, b32, 32) == 0); - secp256k1_fe_to_storage(&fes2, &fe); - CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0); -} - -int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe t = *b; -#ifdef VERIFY - t.magnitude = a->magnitude; - t.normalized = a->normalized; -#endif - return memcmp(a, &t, sizeof(secp256k1_fe)); -} - -void run_field_misc(void) { - secp256k1_fe x; - secp256k1_fe y; - secp256k1_fe z; - secp256k1_fe q; - secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5); - int i, j; - for (i = 0; i < 5*count; i++) { - secp256k1_fe_storage xs, ys, zs; - random_fe(&x); - random_fe_non_zero(&y); - /* Test the fe equality and comparison operations. */ - CHECK(secp256k1_fe_cmp_var(&x, &x) == 0); - CHECK(secp256k1_fe_equal_var(&x, &x)); - z = x; - secp256k1_fe_add(&z,&y); - /* Test fe conditional move; z is not normalized here. */ - q = x; - secp256k1_fe_cmov(&x, &z, 0); - VERIFY_CHECK(!x.normalized && x.magnitude == z.magnitude); - secp256k1_fe_cmov(&x, &x, 1); - CHECK(fe_memcmp(&x, &z) != 0); - CHECK(fe_memcmp(&x, &q) == 0); - secp256k1_fe_cmov(&q, &z, 1); - VERIFY_CHECK(!q.normalized && q.magnitude == z.magnitude); - CHECK(fe_memcmp(&q, &z) == 0); - secp256k1_fe_normalize_var(&x); - secp256k1_fe_normalize_var(&z); - CHECK(!secp256k1_fe_equal_var(&x, &z)); - secp256k1_fe_normalize_var(&q); - secp256k1_fe_cmov(&q, &z, (i&1)); - VERIFY_CHECK(q.normalized && q.magnitude == 1); - for (j = 0; j < 6; j++) { - secp256k1_fe_negate(&z, &z, j+1); - secp256k1_fe_normalize_var(&q); - secp256k1_fe_cmov(&q, &z, (j&1)); - VERIFY_CHECK(!q.normalized && q.magnitude == (j+2)); - } - secp256k1_fe_normalize_var(&z); - /* Test storage conversion and conditional moves. */ - secp256k1_fe_to_storage(&xs, &x); - secp256k1_fe_to_storage(&ys, &y); - secp256k1_fe_to_storage(&zs, &z); - secp256k1_fe_storage_cmov(&zs, &xs, 0); - secp256k1_fe_storage_cmov(&zs, &zs, 1); - CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0); - secp256k1_fe_storage_cmov(&ys, &xs, 1); - CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0); - secp256k1_fe_from_storage(&x, &xs); - secp256k1_fe_from_storage(&y, &ys); - secp256k1_fe_from_storage(&z, &zs); - /* Test that mul_int, mul, and add agree. */ - secp256k1_fe_add(&y, &x); - secp256k1_fe_add(&y, &x); - z = x; - secp256k1_fe_mul_int(&z, 3); - CHECK(check_fe_equal(&y, &z)); - secp256k1_fe_add(&y, &x); - secp256k1_fe_add(&z, &x); - CHECK(check_fe_equal(&z, &y)); - z = x; - secp256k1_fe_mul_int(&z, 5); - secp256k1_fe_mul(&q, &x, &fe5); - CHECK(check_fe_equal(&z, &q)); - secp256k1_fe_negate(&x, &x, 1); - secp256k1_fe_add(&z, &x); - secp256k1_fe_add(&q, &x); - CHECK(check_fe_equal(&y, &z)); - CHECK(check_fe_equal(&q, &y)); - } -} - -void run_field_inv(void) { - secp256k1_fe x, xi, xii; - int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - secp256k1_fe_inv(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - secp256k1_fe_inv(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); - } -} - -void run_field_inv_var(void) { - secp256k1_fe x, xi, xii; - int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - secp256k1_fe_inv_var(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - secp256k1_fe_inv_var(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); - } -} - -void run_field_inv_all_var(void) { - secp256k1_fe x[16], xi[16], xii[16]; - int i; - /* Check it's safe to call for 0 elements */ - secp256k1_fe_inv_all_var(xi, x, 0); - for (i = 0; i < count; i++) { - size_t j; - size_t len = secp256k1_rand_int(15) + 1; - for (j = 0; j < len; j++) { - random_fe_non_zero(&x[j]); - } - secp256k1_fe_inv_all_var(xi, x, len); - for (j = 0; j < len; j++) { - CHECK(check_fe_inverse(&x[j], &xi[j])); - } - secp256k1_fe_inv_all_var(xii, xi, len); - for (j = 0; j < len; j++) { - CHECK(check_fe_equal(&x[j], &xii[j])); - } - } -} - -void run_sqr(void) { - secp256k1_fe x, s; - - { - int i; - secp256k1_fe_set_int(&x, 1); - secp256k1_fe_negate(&x, &x, 1); - - for (i = 1; i <= 512; ++i) { - secp256k1_fe_mul_int(&x, 2); - secp256k1_fe_normalize(&x); - secp256k1_fe_sqr(&s, &x); - } - } -} - -void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) { - secp256k1_fe r1, r2; - int v = secp256k1_fe_sqrt(&r1, a); - CHECK((v == 0) == (k == NULL)); - - if (k != NULL) { - /* Check that the returned root is +/- the given known answer */ - secp256k1_fe_negate(&r2, &r1, 1); - secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k); - secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2); - CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2)); - } -} - -void run_sqrt(void) { - secp256k1_fe ns, x, s, t; - int i; - - /* Check sqrt(0) is 0 */ - secp256k1_fe_set_int(&x, 0); - secp256k1_fe_sqr(&s, &x); - test_sqrt(&s, &x); - - /* Check sqrt of small squares (and their negatives) */ - for (i = 1; i <= 100; i++) { - secp256k1_fe_set_int(&x, i); - secp256k1_fe_sqr(&s, &x); - test_sqrt(&s, &x); - secp256k1_fe_negate(&t, &s, 1); - test_sqrt(&t, NULL); - } - - /* Consistency checks for large random values */ - for (i = 0; i < 10; i++) { - int j; - random_fe_non_square(&ns); - for (j = 0; j < count; j++) { - random_fe(&x); - secp256k1_fe_sqr(&s, &x); - test_sqrt(&s, &x); - secp256k1_fe_negate(&t, &s, 1); - test_sqrt(&t, NULL); - secp256k1_fe_mul(&t, &s, &ns); - test_sqrt(&t, NULL); - } - } -} - -/***** GROUP TESTS *****/ - -void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); - CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); -} - -/* This compares jacobian points including their Z, not just their geometric meaning. */ -int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) { - secp256k1_gej a2; - secp256k1_gej b2; - int ret = 1; - ret &= a->infinity == b->infinity; - if (ret && !a->infinity) { - a2 = *a; - b2 = *b; - secp256k1_fe_normalize(&a2.x); - secp256k1_fe_normalize(&a2.y); - secp256k1_fe_normalize(&a2.z); - secp256k1_fe_normalize(&b2.x); - secp256k1_fe_normalize(&b2.y); - secp256k1_fe_normalize(&b2.z); - ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0; - ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0; - ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0; - } - return ret; -} - -void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { - secp256k1_fe z2s; - secp256k1_fe u1, u2, s1, s2; - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - secp256k1_fe_sqr(&z2s, &b->z); - secp256k1_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; secp256k1_fe_normalize_weak(&u2); - secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); - s2 = b->y; secp256k1_fe_normalize_weak(&s2); - CHECK(secp256k1_fe_equal_var(&u1, &u2)); - CHECK(secp256k1_fe_equal_var(&s1, &s2)); -} - -void test_ge(void) { - int i, i1; -#ifdef USE_ENDOMORPHISM - int runs = 6; -#else - int runs = 4; -#endif - /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4). - * The second in each pair of identical points uses a random Z coordinate in the Jacobian form. - * All magnitudes are randomized. - * All 17*17 combinations of points are added to each other, using all applicable methods. - * - * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well. - */ - secp256k1_ge *ge = (secp256k1_ge *)malloc(sizeof(secp256k1_ge) * (1 + 4 * runs)); - secp256k1_gej *gej = (secp256k1_gej *)malloc(sizeof(secp256k1_gej) * (1 + 4 * runs)); - secp256k1_fe *zinv = (secp256k1_fe *)malloc(sizeof(secp256k1_fe) * (1 + 4 * runs)); - secp256k1_fe zf; - secp256k1_fe zfi2, zfi3; - - secp256k1_gej_set_infinity(&gej[0]); - secp256k1_ge_clear(&ge[0]); - secp256k1_ge_set_gej_var(&ge[0], &gej[0]); - for (i = 0; i < runs; i++) { - int j; - secp256k1_ge g; - random_group_element_test(&g); -#ifdef USE_ENDOMORPHISM - if (i >= runs - 2) { - secp256k1_ge_mul_lambda(&g, &ge[1]); - } - if (i >= runs - 1) { - secp256k1_ge_mul_lambda(&g, &g); - } -#endif - ge[1 + 4 * i] = g; - ge[2 + 4 * i] = g; - secp256k1_ge_neg(&ge[3 + 4 * i], &g); - secp256k1_ge_neg(&ge[4 + 4 * i], &g); - secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]); - random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]); - secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]); - random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]); - for (j = 0; j < 4; j++) { - random_field_element_magnitude(&ge[1 + j + 4 * i].x); - random_field_element_magnitude(&ge[1 + j + 4 * i].y); - random_field_element_magnitude(&gej[1 + j + 4 * i].x); - random_field_element_magnitude(&gej[1 + j + 4 * i].y); - random_field_element_magnitude(&gej[1 + j + 4 * i].z); - } - } - - /* Compute z inverses. */ - { - secp256k1_fe *zs = malloc(sizeof(secp256k1_fe) * (1 + 4 * runs)); - for (i = 0; i < 4 * runs + 1; i++) { - if (i == 0) { - /* The point at infinity does not have a meaningful z inverse. Any should do. */ - do { - random_field_element_test(&zs[i]); - } while(secp256k1_fe_is_zero(&zs[i])); - } else { - zs[i] = gej[i].z; - } - } - secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1); - free(zs); - } - - /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */ - do { - random_field_element_test(&zf); - } while(secp256k1_fe_is_zero(&zf)); - random_field_element_magnitude(&zf); - secp256k1_fe_inv_var(&zfi3, &zf); - secp256k1_fe_sqr(&zfi2, &zfi3); - secp256k1_fe_mul(&zfi3, &zfi3, &zfi2); - - for (i1 = 0; i1 < 1 + 4 * runs; i1++) { - int i2; - for (i2 = 0; i2 < 1 + 4 * runs; i2++) { - /* Compute reference result using gej + gej (var). */ - secp256k1_gej refj, resj; - secp256k1_ge ref; - secp256k1_fe zr; - secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); - /* Check Z ratio. */ - if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) { - secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zrz, &refj.z)); - } - secp256k1_ge_set_gej_var(&ref, &refj); - - /* Test gej + ge with Z ratio result (var). */ - secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr); - ge_equals_gej(&ref, &resj); - if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) { - secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zrz, &resj.z)); - } - - /* Test gej + ge (var, with additional Z factor). */ - { - secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */ - secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2); - secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3); - random_field_element_magnitude(&ge2_zfi.x); - random_field_element_magnitude(&ge2_zfi.y); - secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf); - ge_equals_gej(&ref, &resj); - } - - /* Test gej + ge (const). */ - if (i2 != 0) { - /* secp256k1_gej_add_ge does not support its second argument being infinity. */ - secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]); - ge_equals_gej(&ref, &resj); - } - - /* Test doubling (var). */ - if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) { - secp256k1_fe zr2; - /* Normal doubling with Z ratio result. */ - secp256k1_gej_double_var(&resj, &gej[i1], &zr2); - ge_equals_gej(&ref, &resj); - /* Check Z ratio. */ - secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z); - CHECK(secp256k1_fe_equal_var(&zr2, &resj.z)); - /* Normal doubling. */ - secp256k1_gej_double_var(&resj, &gej[i2], NULL); - ge_equals_gej(&ref, &resj); - } - - /* Test adding opposites. */ - if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) { - CHECK(secp256k1_ge_is_infinity(&ref)); - } - - /* Test adding infinity. */ - if (i1 == 0) { - CHECK(secp256k1_ge_is_infinity(&ge[i1])); - CHECK(secp256k1_gej_is_infinity(&gej[i1])); - ge_equals_gej(&ref, &gej[i2]); - } - if (i2 == 0) { - CHECK(secp256k1_ge_is_infinity(&ge[i2])); - CHECK(secp256k1_gej_is_infinity(&gej[i2])); - ge_equals_gej(&ref, &gej[i1]); - } - } - } - - /* Test adding all points together in random order equals infinity. */ - { - secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY; - secp256k1_gej *gej_shuffled = (secp256k1_gej *)malloc((4 * runs + 1) * sizeof(secp256k1_gej)); - for (i = 0; i < 4 * runs + 1; i++) { - gej_shuffled[i] = gej[i]; - } - for (i = 0; i < 4 * runs + 1; i++) { - int swap = i + secp256k1_rand_int(4 * runs + 1 - i); - if (swap != i) { - secp256k1_gej t = gej_shuffled[i]; - gej_shuffled[i] = gej_shuffled[swap]; - gej_shuffled[swap] = t; - } - } - for (i = 0; i < 4 * runs + 1; i++) { - secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL); - } - CHECK(secp256k1_gej_is_infinity(&sum)); - free(gej_shuffled); - } - - /* Test batch gej -> ge conversion with and without known z ratios. */ - { - secp256k1_fe *zr = (secp256k1_fe *)malloc((4 * runs + 1) * sizeof(secp256k1_fe)); - secp256k1_ge *ge_set_table = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge)); - secp256k1_ge *ge_set_all = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge)); - for (i = 0; i < 4 * runs + 1; i++) { - /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ - if (i < 4 * runs) { - secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); - } - } - secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1); - secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback); - for (i = 0; i < 4 * runs + 1; i++) { - secp256k1_fe s; - random_fe_non_zero(&s); - secp256k1_gej_rescale(&gej[i], &s); - ge_equals_gej(&ge_set_table[i], &gej[i]); - ge_equals_gej(&ge_set_all[i], &gej[i]); - } - free(ge_set_table); - free(ge_set_all); - free(zr); - } - - free(ge); - free(gej); - free(zinv); -} - -void test_add_neg_y_diff_x(void) { - /* The point of this test is to check that we can add two points - * whose y-coordinates are negatives of each other but whose x - * coordinates differ. If the x-coordinates were the same, these - * points would be negatives of each other and their sum is - * infinity. This is cool because it "covers up" any degeneracy - * in the addition algorithm that would cause the xy coordinates - * of the sum to be wrong (since infinity has no xy coordinates). - * HOWEVER, if the x-coordinates are different, infinity is the - * wrong answer, and such degeneracies are exposed. This is the - * root of https://github.com/bitcoin-core/secp256k1/issues/257 - * which this test is a regression test for. - * - * These points were generated in sage as - * # secp256k1 params - * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) - * C = EllipticCurve ([F (0), F (7)]) - * G = C.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) - * N = FiniteField(G.order()) - * - * # endomorphism values (lambda is 1^{1/3} in N, beta is 1^{1/3} in F) - * x = polygen(N) - * lam = (1 - x^3).roots()[1][0] - * - * # random "bad pair" - * P = C.random_element() - * Q = -int(lam) * P - * print " P: %x %x" % P.xy() - * print " Q: %x %x" % Q.xy() - * print "P + Q: %x %x" % (P + Q).xy() - */ - secp256k1_gej aj = SECP256K1_GEJ_CONST( - 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30, - 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb, - 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8, - 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d - ); - secp256k1_gej bj = SECP256K1_GEJ_CONST( - 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86, - 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7, - 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57, - 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2 - ); - secp256k1_gej sumj = SECP256K1_GEJ_CONST( - 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027, - 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a, - 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08, - 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe - ); - secp256k1_ge b; - secp256k1_gej resj; - secp256k1_ge res; - secp256k1_ge_set_gej(&b, &bj); - - secp256k1_gej_add_var(&resj, &aj, &bj, NULL); - secp256k1_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); - - secp256k1_gej_add_ge(&resj, &aj, &b); - secp256k1_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); - - secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL); - secp256k1_ge_set_gej(&res, &resj); - ge_equals_gej(&res, &sumj); -} - -void run_ge(void) { - int i; - for (i = 0; i < count * 32; i++) { - test_ge(); - } - test_add_neg_y_diff_x(); -} - -void test_ec_combine(void) { - secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_pubkey data[6]; - const secp256k1_pubkey* d[6]; - secp256k1_pubkey sd; - secp256k1_pubkey sd2; - secp256k1_gej Qj; - secp256k1_ge Q; - int i; - for (i = 1; i <= 6; i++) { - secp256k1_scalar s; - random_scalar_order_test(&s); - secp256k1_scalar_add(&sum, &sum, &s); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s); - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(&data[i - 1], &Q); - d[i - 1] = &data[i - 1]; - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum); - secp256k1_ge_set_gej(&Q, &Qj); - secp256k1_pubkey_save(&sd, &Q); - CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1); - CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0); - } -} - -void run_ec_combine(void) { - int i; - for (i = 0; i < count * 8; i++) { - test_ec_combine(); - } -} - -void test_group_decompress(const secp256k1_fe* x) { - /* The input itself, normalized. */ - secp256k1_fe fex = *x; - secp256k1_fe fez; - /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */ - secp256k1_ge ge_quad, ge_even, ge_odd; - secp256k1_gej gej_quad; - /* Return values of the above calls. */ - int res_quad, res_even, res_odd; - - secp256k1_fe_normalize_var(&fex); - - res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex); - res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0); - res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1); - - CHECK(res_quad == res_even); - CHECK(res_quad == res_odd); - - if (res_quad) { - secp256k1_fe_normalize_var(&ge_quad.x); - secp256k1_fe_normalize_var(&ge_odd.x); - secp256k1_fe_normalize_var(&ge_even.x); - secp256k1_fe_normalize_var(&ge_quad.y); - secp256k1_fe_normalize_var(&ge_odd.y); - secp256k1_fe_normalize_var(&ge_even.y); - - /* No infinity allowed. */ - CHECK(!ge_quad.infinity); - CHECK(!ge_even.infinity); - CHECK(!ge_odd.infinity); - - /* Check that the x coordinates check out. */ - CHECK(secp256k1_fe_equal_var(&ge_quad.x, x)); - CHECK(secp256k1_fe_equal_var(&ge_even.x, x)); - CHECK(secp256k1_fe_equal_var(&ge_odd.x, x)); - - /* Check that the Y coordinate result in ge_quad is a square. */ - CHECK(secp256k1_fe_is_quad_var(&ge_quad.y)); - - /* Check odd/even Y in ge_odd, ge_even. */ - CHECK(secp256k1_fe_is_odd(&ge_odd.y)); - CHECK(!secp256k1_fe_is_odd(&ge_even.y)); - - /* Check secp256k1_gej_has_quad_y_var. */ - secp256k1_gej_set_ge(&gej_quad, &ge_quad); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); - do { - random_fe_test(&fez); - } while (secp256k1_fe_is_zero(&fez)); - secp256k1_gej_rescale(&gej_quad, &fez); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); - secp256k1_gej_neg(&gej_quad, &gej_quad); - CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); - do { - random_fe_test(&fez); - } while (secp256k1_fe_is_zero(&fez)); - secp256k1_gej_rescale(&gej_quad, &fez); - CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad)); - secp256k1_gej_neg(&gej_quad, &gej_quad); - CHECK(secp256k1_gej_has_quad_y_var(&gej_quad)); - } -} - -void run_group_decompress(void) { - int i; - for (i = 0; i < count * 4; i++) { - secp256k1_fe fe; - random_fe_test(&fe); - test_group_decompress(&fe); - } -} - -/***** ECMULT TESTS *****/ - -void run_ecmult_chain(void) { - /* random starting point A (on the curve) */ - secp256k1_gej a = SECP256K1_GEJ_CONST( - 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3, - 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004, - 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f, - 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f - ); - /* two random initial factors xn and gn */ - secp256k1_scalar xn = SECP256K1_SCALAR_CONST( - 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c, - 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407 - ); - secp256k1_scalar gn = SECP256K1_SCALAR_CONST( - 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9, - 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de - ); - /* two small multipliers to be applied to xn and gn in every iteration: */ - static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337); - static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113); - /* accumulators with the resulting coefficients to A and G */ - secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - /* actual points */ - secp256k1_gej x; - secp256k1_gej x2; - int i; - - /* the point being computed */ - x = a; - for (i = 0; i < 200*count; i++) { - /* in each iteration, compute X = xn*X + gn*G; */ - secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn); - /* also compute ae and ge: the actual accumulated factors for A and G */ - /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */ - secp256k1_scalar_mul(&ae, &ae, &xn); - secp256k1_scalar_mul(&ge, &ge, &xn); - secp256k1_scalar_add(&ge, &ge, &gn); - /* modify xn and gn */ - secp256k1_scalar_mul(&xn, &xn, &xf); - secp256k1_scalar_mul(&gn, &gn, &gf); - - /* verify */ - if (i == 19999) { - /* expected result after 19999 iterations */ - secp256k1_gej rp = SECP256K1_GEJ_CONST( - 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE, - 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830, - 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D, - 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88 - ); - - secp256k1_gej_neg(&rp, &rp); - secp256k1_gej_add_var(&rp, &rp, &x, NULL); - CHECK(secp256k1_gej_is_infinity(&rp)); - } - } - /* redo the computation, but directly with the resulting ae and ge coefficients: */ - secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge); - secp256k1_gej_neg(&x2, &x2); - secp256k1_gej_add_var(&x2, &x2, &x, NULL); - CHECK(secp256k1_gej_is_infinity(&x2)); -} - -void test_point_times_order(const secp256k1_gej *point) { - /* X * (point + G) + (order-X) * (pointer + G) = 0 */ - secp256k1_scalar x; - secp256k1_scalar nx; - secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_gej res1, res2; - secp256k1_ge res3; - unsigned char pub[65]; - size_t psize = 65; - random_scalar_order_test(&x); - secp256k1_scalar_negate(&nx, &x); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */ - secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */ - secp256k1_gej_add_var(&res1, &res1, &res2, NULL); - CHECK(secp256k1_gej_is_infinity(&res1)); - CHECK(secp256k1_gej_is_valid_var(&res1) == 0); - secp256k1_ge_set_gej(&res3, &res1); - CHECK(secp256k1_ge_is_infinity(&res3)); - CHECK(secp256k1_ge_is_valid_var(&res3) == 0); - CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0); - psize = 65; - CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0); - /* check zero/one edge cases */ - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero); - secp256k1_ge_set_gej(&res3, &res1); - CHECK(secp256k1_ge_is_infinity(&res3)); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero); - secp256k1_ge_set_gej(&res3, &res1); - ge_equals_gej(&res3, point); - secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one); - secp256k1_ge_set_gej(&res3, &res1); - ge_equals_ge(&res3, &secp256k1_ge_const_g); -} - -void run_point_times_order(void) { - int i; - secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2); - static const secp256k1_fe xr = SECP256K1_FE_CONST( - 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C, - 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45 - ); - for (i = 0; i < 500; i++) { - secp256k1_ge p; - if (secp256k1_ge_set_xo_var(&p, &x, 1)) { - secp256k1_gej j; - CHECK(secp256k1_ge_is_valid_var(&p)); - secp256k1_gej_set_ge(&j, &p); - CHECK(secp256k1_gej_is_valid_var(&j)); - test_point_times_order(&j); - } - secp256k1_fe_sqr(&x, &x); - } - secp256k1_fe_normalize_var(&x); - CHECK(secp256k1_fe_equal_var(&x, &xr)); -} - -void ecmult_const_random_mult(void) { - /* random starting point A (on the curve) */ - secp256k1_ge a = SECP256K1_GE_CONST( - 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b, - 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a, - 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c, - 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d - ); - /* random initial factor xn */ - secp256k1_scalar xn = SECP256K1_SCALAR_CONST( - 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327, - 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b - ); - /* expected xn * A (from sage) */ - secp256k1_ge expected_b = SECP256K1_GE_CONST( - 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd, - 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786, - 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f, - 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956 - ); - secp256k1_gej b; - secp256k1_ecmult_const(&b, &a, &xn); - - CHECK(secp256k1_ge_is_valid_var(&a)); - ge_equals_gej(&expected_b, &b); -} - -void ecmult_const_commutativity(void) { - secp256k1_scalar a; - secp256k1_scalar b; - secp256k1_gej res1; - secp256k1_gej res2; - secp256k1_ge mid1; - secp256k1_ge mid2; - random_scalar_order_test(&a); - random_scalar_order_test(&b); - - secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a); - secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b); - secp256k1_ge_set_gej(&mid1, &res1); - secp256k1_ge_set_gej(&mid2, &res2); - secp256k1_ecmult_const(&res1, &mid1, &b); - secp256k1_ecmult_const(&res2, &mid2, &a); - secp256k1_ge_set_gej(&mid1, &res1); - secp256k1_ge_set_gej(&mid2, &res2); - ge_equals_ge(&mid1, &mid2); -} - -void ecmult_const_mult_zero_one(void) { - secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); - secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); - secp256k1_scalar negone; - secp256k1_gej res1; - secp256k1_ge res2; - secp256k1_ge point; - secp256k1_scalar_negate(&negone, &one); - - random_group_element_test(&point); - secp256k1_ecmult_const(&res1, &point, &zero); - secp256k1_ge_set_gej(&res2, &res1); - CHECK(secp256k1_ge_is_infinity(&res2)); - secp256k1_ecmult_const(&res1, &point, &one); - secp256k1_ge_set_gej(&res2, &res1); - ge_equals_ge(&res2, &point); - secp256k1_ecmult_const(&res1, &point, &negone); - secp256k1_gej_neg(&res1, &res1); - secp256k1_ge_set_gej(&res2, &res1); - ge_equals_ge(&res2, &point); -} - -void ecmult_const_chain_multiply(void) { - /* Check known result (randomly generated test problem from sage) */ - const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST( - 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d, - 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b - ); - const secp256k1_gej expected_point = SECP256K1_GEJ_CONST( - 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd, - 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f, - 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196, - 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435 - ); - secp256k1_gej point; - secp256k1_ge res; - int i; - - secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g); - for (i = 0; i < 100; ++i) { - secp256k1_ge tmp; - secp256k1_ge_set_gej(&tmp, &point); - secp256k1_ecmult_const(&point, &tmp, &scalar); - } - secp256k1_ge_set_gej(&res, &point); - ge_equals_gej(&res, &expected_point); -} - -void run_ecmult_const_tests(void) { - ecmult_const_mult_zero_one(); - ecmult_const_random_mult(); - ecmult_const_commutativity(); - ecmult_const_chain_multiply(); -} - -void test_wnaf(const secp256k1_scalar *number, int w) { - secp256k1_scalar x, two, t; - int wnaf[256]; - int zeroes = -1; - int i; - int bits; - secp256k1_scalar_set_int(&x, 0); - secp256k1_scalar_set_int(&two, 2); - bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w); - CHECK(bits <= 256); - for (i = bits-1; i >= 0; i--) { - int v = wnaf[i]; - secp256k1_scalar_mul(&x, &x, &two); - if (v) { - CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */ - zeroes=0; - CHECK((v & 1) == 1); /* check non-zero elements are odd */ - CHECK(v <= (1 << (w-1)) - 1); /* check range below */ - CHECK(v >= -(1 << (w-1)) - 1); /* check range above */ - } else { - CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */ - zeroes++; - } - if (v >= 0) { - secp256k1_scalar_set_int(&t, v); - } else { - secp256k1_scalar_set_int(&t, -v); - secp256k1_scalar_negate(&t, &t); - } - secp256k1_scalar_add(&x, &x, &t); - } - CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */ -} - -void test_constant_wnaf_negate(const secp256k1_scalar *number) { - secp256k1_scalar neg1 = *number; - secp256k1_scalar neg2 = *number; - int sign1 = 1; - int sign2 = 1; - - if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) { - secp256k1_scalar_negate(&neg1, &neg1); - sign1 = -1; - } - sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2)); - CHECK(sign1 == sign2); - CHECK(secp256k1_scalar_eq(&neg1, &neg2)); -} - -void test_constant_wnaf(const secp256k1_scalar *number, int w) { - secp256k1_scalar x, shift; - int wnaf[256] = {0}; - int i; - int skew; - secp256k1_scalar num = *number; - - secp256k1_scalar_set_int(&x, 0); - secp256k1_scalar_set_int(&shift, 1 << w); - /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */ -#ifdef USE_ENDOMORPHISM - for (i = 0; i < 16; ++i) { - secp256k1_scalar_shr_int(&num, 8); - } -#endif - skew = secp256k1_wnaf_const(wnaf, num, w); - - for (i = WNAF_SIZE(w); i >= 0; --i) { - secp256k1_scalar t; - int v = wnaf[i]; - CHECK(v != 0); /* check nonzero */ - CHECK(v & 1); /* check parity */ - CHECK(v > -(1 << w)); /* check range above */ - CHECK(v < (1 << w)); /* check range below */ - - secp256k1_scalar_mul(&x, &x, &shift); - if (v >= 0) { - secp256k1_scalar_set_int(&t, v); - } else { - secp256k1_scalar_set_int(&t, -v); - secp256k1_scalar_negate(&t, &t); - } - secp256k1_scalar_add(&x, &x, &t); - } - /* Skew num because when encoding numbers as odd we use an offset */ - secp256k1_scalar_cadd_bit(&num, skew == 2, 1); - CHECK(secp256k1_scalar_eq(&x, &num)); -} - -void run_wnaf(void) { - int i; - secp256k1_scalar n = {{0}}; - - /* Sanity check: 1 and 2 are the smallest odd and even numbers and should - * have easier-to-diagnose failure modes */ - n.d[0] = 1; - test_constant_wnaf(&n, 4); - n.d[0] = 2; - test_constant_wnaf(&n, 4); - /* Random tests */ - for (i = 0; i < count; i++) { - random_scalar_order(&n); - test_wnaf(&n, 4+(i%10)); - test_constant_wnaf_negate(&n); - test_constant_wnaf(&n, 4 + (i % 10)); - } - secp256k1_scalar_set_int(&n, 0); - CHECK(secp256k1_scalar_cond_negate(&n, 1) == -1); - CHECK(secp256k1_scalar_is_zero(&n)); - CHECK(secp256k1_scalar_cond_negate(&n, 0) == 1); - CHECK(secp256k1_scalar_is_zero(&n)); -} - -void test_ecmult_constants(void) { - /* Test ecmult_gen() for [0..36) and [order-36..0). */ - secp256k1_scalar x; - secp256k1_gej r; - secp256k1_ge ng; - int i; - int j; - secp256k1_ge_neg(&ng, &secp256k1_ge_const_g); - for (i = 0; i < 36; i++ ) { - secp256k1_scalar_set_int(&x, i); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); - for (j = 0; j < i; j++) { - if (j == i - 1) { - ge_equals_gej(&secp256k1_ge_const_g, &r); - } - secp256k1_gej_add_ge(&r, &r, &ng); - } - CHECK(secp256k1_gej_is_infinity(&r)); - } - for (i = 1; i <= 36; i++ ) { - secp256k1_scalar_set_int(&x, i); - secp256k1_scalar_negate(&x, &x); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x); - for (j = 0; j < i; j++) { - if (j == i - 1) { - ge_equals_gej(&ng, &r); - } - secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g); - } - CHECK(secp256k1_gej_is_infinity(&r)); - } -} - -void run_ecmult_constants(void) { - test_ecmult_constants(); -} - -void test_ecmult_gen_blind(void) { - /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */ - secp256k1_scalar key; - secp256k1_scalar b; - unsigned char seed32[32]; - secp256k1_gej pgej; - secp256k1_gej pgej2; - secp256k1_gej i; - secp256k1_ge pge; - random_scalar_order_test(&key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key); - secp256k1_rand256(seed32); - b = ctx->ecmult_gen_ctx.blind; - i = ctx->ecmult_gen_ctx.initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); - CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key); - CHECK(!gej_xyz_equals_gej(&pgej, &pgej2)); - CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial)); - secp256k1_ge_set_gej(&pge, &pgej); - ge_equals_gej(&pge, &pgej2); -} - -void test_ecmult_gen_blind_reset(void) { - /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */ - secp256k1_scalar b; - secp256k1_gej initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); - b = ctx->ecmult_gen_ctx.blind; - initial = ctx->ecmult_gen_ctx.initial; - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0); - CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind)); - CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial)); -} - -void run_ecmult_gen_blind(void) { - int i; - test_ecmult_gen_blind_reset(); - for (i = 0; i < 10; i++) { - test_ecmult_gen_blind(); - } -} - -#ifdef USE_ENDOMORPHISM -/***** ENDOMORPHISH TESTS *****/ -void test_scalar_split(void) { - secp256k1_scalar full; - secp256k1_scalar s1, slam; - const unsigned char zero[32] = {0}; - unsigned char tmp[32]; - - random_scalar_order_test(&full); - secp256k1_scalar_split_lambda(&s1, &slam, &full); - - /* check that both are <= 128 bits in size */ - if (secp256k1_scalar_is_high(&s1)) { - secp256k1_scalar_negate(&s1, &s1); - } - if (secp256k1_scalar_is_high(&slam)) { - secp256k1_scalar_negate(&slam, &slam); - } - - secp256k1_scalar_get_b32(tmp, &s1); - CHECK(memcmp(zero, tmp, 16) == 0); - secp256k1_scalar_get_b32(tmp, &slam); - CHECK(memcmp(zero, tmp, 16) == 0); -} - -void run_endomorphism_tests(void) { - test_scalar_split(); -} -#endif - -void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) { - unsigned char pubkeyc[65]; - secp256k1_pubkey pubkey; - secp256k1_ge ge; - size_t pubkeyclen; - int32_t ecount; - ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) { - /* Smaller sizes are tested exhaustively elsewhere. */ - int32_t i; - memcpy(&pubkeyc[1], input, 64); - VG_UNDEF(&pubkeyc[pubkeyclen], 65 - pubkeyclen); - for (i = 0; i < 256; i++) { - /* Try all type bytes. */ - int xpass; - int ypass; - int ysign; - pubkeyc[0] = i; - /* What sign does this point have? */ - ysign = (input[63] & 1) + 2; - /* For the current type (i) do we expect parsing to work? Handled all of compressed/uncompressed/hybrid. */ - xpass = xvalid && (pubkeyclen == 33) && ((i & 254) == 2); - /* Do we expect a parse and re-serialize as uncompressed to give a matching y? */ - ypass = xvalid && yvalid && ((i & 4) == ((pubkeyclen == 65) << 2)) && - ((i == 4) || ((i & 251) == ysign)) && ((pubkeyclen == 33) || (pubkeyclen == 65)); - if (xpass || ypass) { - /* These cases must parse. */ - unsigned char pubkeyo[65]; - size_t outl; - memset(&pubkey, 0, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - ecount = 0; - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); - VG_CHECK(&pubkey, sizeof(pubkey)); - outl = 65; - VG_UNDEF(pubkeyo, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - VG_CHECK(pubkeyo, outl); - CHECK(outl == 33); - CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0); - CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0])); - if (ypass) { - /* This test isn't always done because we decode with alternative signs, so the y won't match. */ - CHECK(pubkeyo[0] == ysign); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); - memset(&pubkey, 0, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - secp256k1_pubkey_save(&pubkey, &ge); - VG_CHECK(&pubkey, sizeof(pubkey)); - outl = 65; - VG_UNDEF(pubkeyo, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); - VG_CHECK(pubkeyo, outl); - CHECK(outl == 65); - CHECK(pubkeyo[0] == 4); - CHECK(memcmp(&pubkeyo[1], input, 64) == 0); - } - CHECK(ecount == 0); - } else { - /* These cases must fail to parse. */ - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - } - } - } - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); -} - -void run_ec_pubkey_parse_test(void) { -#define SECP256K1_EC_PARSE_TEST_NVALID (12) - const unsigned char valid[SECP256K1_EC_PARSE_TEST_NVALID][64] = { - { - /* Point with leading and trailing zeros in x and y serialization. */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x52, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x64, 0xef, 0xa1, 0x7b, 0x77, 0x61, 0xe1, 0xe4, 0x27, 0x06, 0x98, 0x9f, 0xb4, 0x83, - 0xb8, 0xd2, 0xd4, 0x9b, 0xf7, 0x8f, 0xae, 0x98, 0x03, 0xf0, 0x99, 0xb8, 0x34, 0xed, 0xeb, 0x00 - }, - { - /* Point with x equal to a 3rd root of unity.*/ - 0x7a, 0xe9, 0x6a, 0x2b, 0x65, 0x7c, 0x07, 0x10, 0x6e, 0x64, 0x47, 0x9e, 0xac, 0x34, 0x34, 0xe9, - 0x9c, 0xf0, 0x49, 0x75, 0x12, 0xf5, 0x89, 0x95, 0xc1, 0x39, 0x6c, 0x28, 0x71, 0x95, 0x01, 0xee, - 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, - 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, - }, - { - /* Point with largest x. (1/2) */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c, - 0x0e, 0x99, 0x4b, 0x14, 0xea, 0x72, 0xf8, 0xc3, 0xeb, 0x95, 0xc7, 0x1e, 0xf6, 0x92, 0x57, 0x5e, - 0x77, 0x50, 0x58, 0x33, 0x2d, 0x7e, 0x52, 0xd0, 0x99, 0x5c, 0xf8, 0x03, 0x88, 0x71, 0xb6, 0x7d, - }, - { - /* Point with largest x. (2/2) */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c, - 0xf1, 0x66, 0xb4, 0xeb, 0x15, 0x8d, 0x07, 0x3c, 0x14, 0x6a, 0x38, 0xe1, 0x09, 0x6d, 0xa8, 0xa1, - 0x88, 0xaf, 0xa7, 0xcc, 0xd2, 0x81, 0xad, 0x2f, 0x66, 0xa3, 0x07, 0xfb, 0x77, 0x8e, 0x45, 0xb2, - }, - { - /* Point with smallest x. (1/2) */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, - 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, - }, - { - /* Point with smallest x. (2/2) */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb, - 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41, - }, - { - /* Point with largest y. (1/3) */ - 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, - 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, - }, - { - /* Point with largest y. (2/3) */ - 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, - 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, - }, - { - /* Point with largest y. (3/3) */ - 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, - 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, - }, - { - /* Point with smallest y. (1/3) */ - 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, - 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }, - { - /* Point with smallest y. (2/3) */ - 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, - 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }, - { - /* Point with smallest y. (3/3) */ - 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, - 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 - } - }; -#define SECP256K1_EC_PARSE_TEST_NXVALID (4) - const unsigned char onlyxvalid[SECP256K1_EC_PARSE_TEST_NXVALID][64] = { - { - /* Valid if y overflow ignored (y = 1 mod p). (1/3) */ - 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6, - 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, - }, - { - /* Valid if y overflow ignored (y = 1 mod p). (2/3) */ - 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c, - 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, - }, - { - /* Valid if y overflow ignored (y = 1 mod p). (3/3)*/ - 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc, - 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, - }, - { - /* x on curve, y is from y^2 = x^3 + 8. */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03 - } - }; -#define SECP256K1_EC_PARSE_TEST_NINVALID (7) - const unsigned char invalid[SECP256K1_EC_PARSE_TEST_NINVALID][64] = { - { - /* x is third root of -8, y is -1 * (x^3+7); also on the curve for y^2 = x^3 + 9. */ - 0x0a, 0x2d, 0x2b, 0xa9, 0x35, 0x07, 0xf1, 0xdf, 0x23, 0x37, 0x70, 0xc2, 0xa7, 0x97, 0x96, 0x2c, - 0xc6, 0x1f, 0x6d, 0x15, 0xda, 0x14, 0xec, 0xd4, 0x7d, 0x8d, 0x27, 0xae, 0x1c, 0xd5, 0xf8, 0x53, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }, - { - /* Valid if x overflow ignored (x = 1 mod p). */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, - 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14, - 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee, - }, - { - /* Valid if x overflow ignored (x = 1 mod p). */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30, - 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb, - 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41, - }, - { - /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, - 0xf4, 0x84, 0x14, 0x5c, 0xb0, 0x14, 0x9b, 0x82, 0x5d, 0xff, 0x41, 0x2f, 0xa0, 0x52, 0xa8, 0x3f, - 0xcb, 0x72, 0xdb, 0x61, 0xd5, 0x6f, 0x37, 0x70, 0xce, 0x06, 0x6b, 0x73, 0x49, 0xa2, 0xaa, 0x28, - }, - { - /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e, - 0x0b, 0x7b, 0xeb, 0xa3, 0x4f, 0xeb, 0x64, 0x7d, 0xa2, 0x00, 0xbe, 0xd0, 0x5f, 0xad, 0x57, 0xc0, - 0x34, 0x8d, 0x24, 0x9e, 0x2a, 0x90, 0xc8, 0x8f, 0x31, 0xf9, 0x94, 0x8b, 0xb6, 0x5d, 0x52, 0x07, - }, - { - /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x8f, 0x53, 0x7e, 0xef, 0xdf, 0xc1, 0x60, 0x6a, 0x07, 0x27, 0xcd, 0x69, 0xb4, 0xa7, 0x33, 0x3d, - 0x38, 0xed, 0x44, 0xe3, 0x93, 0x2a, 0x71, 0x79, 0xee, 0xcb, 0x4b, 0x6f, 0xba, 0x93, 0x60, 0xdc, - }, - { - /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x70, 0xac, 0x81, 0x10, 0x20, 0x3e, 0x9f, 0x95, 0xf8, 0xd8, 0x32, 0x96, 0x4b, 0x58, 0xcc, 0xc2, - 0xc7, 0x12, 0xbb, 0x1c, 0x6c, 0xd5, 0x8e, 0x86, 0x11, 0x34, 0xb4, 0x8f, 0x45, 0x6c, 0x9b, 0x53 - } - }; - const unsigned char pubkeyc[66] = { - /* Serialization of G. */ - 0x04, 0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62, 0x95, 0xCE, 0x87, 0x0B, - 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE, 0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17, - 0x98, 0x48, 0x3A, 0xDA, 0x77, 0x26, 0xA3, 0xC4, 0x65, 0x5D, 0xA4, 0xFB, 0xFC, 0x0E, 0x11, 0x08, - 0xA8, 0xFD, 0x17, 0xB4, 0x48, 0xA6, 0x85, 0x54, 0x19, 0x9C, 0x47, 0xD0, 0x8F, 0xFB, 0x10, 0xD4, - 0xB8, 0x00 - }; - unsigned char sout[65]; - unsigned char shortkey[2]; - secp256k1_ge ge; - secp256k1_pubkey pubkey; - size_t len; - int32_t i; - int32_t ecount; - int32_t ecount2; - ecount = 0; - /* Nothing should be reading this far into pubkeyc. */ - VG_UNDEF(&pubkeyc[65], 1); - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - /* Zero length claimed, fail, zeroize, no illegal arg error. */ - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(shortkey, 2); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - /* Length one claimed, fail, zeroize, no illegal arg error. */ - for (i = 0; i < 256 ; i++) { - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - shortkey[0] = i; - VG_UNDEF(&shortkey[1], 1); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - } - /* Length two claimed, fail, zeroize, no illegal arg error. */ - for (i = 0; i < 65536 ; i++) { - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - shortkey[0] = i & 255; - shortkey[1] = i >> 8; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - } - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */ - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */ - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0); - CHECK(ecount == 2); - /* NULL input string. Illegal arg and zeroize output. */ - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 1); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 2); - /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */ - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */ - memset(&pubkey, 0xfe, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0); - CHECK(ecount == 1); - /* Valid parse. */ - memset(&pubkey, 0, sizeof(pubkey)); - ecount = 0; - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(ecount == 0); - VG_UNDEF(&ge, sizeof(ge)); - CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1); - VG_CHECK(&ge.x, sizeof(ge.x)); - VG_CHECK(&ge.y, sizeof(ge.y)); - VG_CHECK(&ge.infinity, sizeof(ge.infinity)); - ge_equals_ge(&secp256k1_ge_const_g, &ge); - CHECK(ecount == 0); - /* secp256k1_ec_pubkey_serialize illegal args. */ - ecount = 0; - len = 65; - CHECK(secp256k1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); - CHECK(ecount == 1); - CHECK(len == 0); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0); - CHECK(ecount == 2); - len = 65; - VG_UNDEF(sout, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0); - VG_CHECK(sout, 65); - CHECK(ecount == 3); - CHECK(len == 0); - len = 65; - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0); - CHECK(ecount == 4); - CHECK(len == 0); - len = 65; - VG_UNDEF(sout, 65); - CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1); - VG_CHECK(sout, 65); - CHECK(ecount == 4); - CHECK(len == 65); - /* Multiple illegal args. Should still set arg error only once. */ - ecount = 0; - ecount2 = 11; - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); - CHECK(ecount == 1); - /* Does the illegal arg callback actually change the behavior? */ - secp256k1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2); - CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0); - CHECK(ecount == 1); - CHECK(ecount2 == 10); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); - /* Try a bunch of prefabbed points with all possible encodings. */ - for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) { - ec_pubkey_parse_pointtest(valid[i], 1, 1); - } - for (i = 0; i < SECP256K1_EC_PARSE_TEST_NXVALID; i++) { - ec_pubkey_parse_pointtest(onlyxvalid[i], 1, 0); - } - for (i = 0; i < SECP256K1_EC_PARSE_TEST_NINVALID; i++) { - ec_pubkey_parse_pointtest(invalid[i], 0, 0); - } -} - -void run_eckey_edge_case_test(void) { - const unsigned char orderc[32] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41 - }; - const unsigned char zeros[sizeof(secp256k1_pubkey)] = {0x00}; - unsigned char ctmp[33]; - unsigned char ctmp2[33]; - secp256k1_pubkey pubkey; - secp256k1_pubkey pubkey2; - secp256k1_pubkey pubkey_one; - secp256k1_pubkey pubkey_negone; - const secp256k1_pubkey *pubkeys[3]; - size_t len; - int32_t ecount; - /* Group order is too large, reject. */ - CHECK(secp256k1_ec_seckey_verify(ctx, orderc) == 0); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* Maximum value is too large, reject. */ - memset(ctmp, 255, 32); - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); - memset(&pubkey, 1, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* Zero is too small, reject. */ - memset(ctmp, 0, 32); - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); - memset(&pubkey, 1, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* One must be accepted. */ - ctmp[31] = 0x01; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); - memset(&pubkey, 0, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); - pubkey_one = pubkey; - /* Group order + 1 is too large, reject. */ - memcpy(ctmp, orderc, 32); - ctmp[31] = 0x42; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0); - memset(&pubkey, 1, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* -1 must be accepted. */ - ctmp[31] = 0x40; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); - memset(&pubkey, 0, sizeof(pubkey)); - VG_UNDEF(&pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1); - VG_CHECK(&pubkey, sizeof(pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); - pubkey_negone = pubkey; - /* Tweak of zero leaves the value changed. */ - memset(ctmp2, 0, 32); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, ctmp2) == 1); - CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40); - memcpy(&pubkey2, &pubkey, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - /* Multiply tweak of zero zeroizes the output. */ - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, ctmp2) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); - memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* Overflowing key tweak zeroizes. */ - memcpy(ctmp, orderc, 32); - ctmp[31] = 0x40; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, orderc) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); - memcpy(ctmp, orderc, 32); - ctmp[31] = 0x40; - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, orderc) == 0); - CHECK(memcmp(zeros, ctmp, 32) == 0); - memcpy(ctmp, orderc, 32); - ctmp[31] = 0x40; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); - memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); - memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* Private key tweaks results in a key of zero. */ - ctmp2[31] = 1; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 0); - CHECK(memcmp(zeros, ctmp2, 32) == 0); - ctmp2[31] = 1; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); - memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - /* Tweak computation wraps and results in a key of 1. */ - ctmp2[31] = 2; - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 1); - CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1); - ctmp2[31] = 2; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - ctmp2[31] = 1; - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - /* Tweak mul * 2 = 1+1. */ - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1); - ctmp2[31] = 2; - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - /* Test argument errors. */ - ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(ecount == 0); - /* Zeroize pubkey on parse error. */ - memset(&pubkey, 0, 32); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0); - memcpy(&pubkey, &pubkey2, sizeof(pubkey)); - memset(&pubkey2, 0, 32); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0); - CHECK(ecount == 2); - CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0); - /* Plain argument errors. */ - ecount = 0; - CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1); - CHECK(ecount == 0); - CHECK(secp256k1_ec_seckey_verify(ctx, NULL) == 0); - CHECK(ecount == 1); - ecount = 0; - memset(ctmp2, 0, 32); - ctmp2[31] = 4; - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; - memset(ctmp2, 0, 32); - ctmp2[31] = 4; - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; - memset(ctmp2, 0, 32); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; - memset(ctmp2, 0, 32); - ctmp2[31] = 1; - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, NULL, ctmp2) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, NULL) == 0); - CHECK(ecount == 2); - ecount = 0; - CHECK(secp256k1_ec_pubkey_create(ctx, NULL, ctmp) == 0); - CHECK(ecount == 1); - memset(&pubkey, 1, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); - CHECK(ecount == 2); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - /* secp256k1_ec_pubkey_combine tests. */ - ecount = 0; - pubkeys[0] = &pubkey_one; - VG_UNDEF(&pubkeys[0], sizeof(secp256k1_pubkey *)); - VG_UNDEF(&pubkeys[1], sizeof(secp256k1_pubkey *)); - VG_UNDEF(&pubkeys[2], sizeof(secp256k1_pubkey *)); - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - CHECK(ecount == 2); - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - CHECK(ecount == 3); - pubkeys[0] = &pubkey_negone; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); - CHECK(ecount == 3); - len = 33; - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1); - CHECK(memcmp(ctmp, ctmp2, 33) == 0); - /* Result is infinity. */ - pubkeys[0] = &pubkey_one; - pubkeys[1] = &pubkey_negone; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0); - CHECK(ecount == 3); - /* Passes through infinity but comes out one. */ - pubkeys[2] = &pubkey_one; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); - CHECK(ecount == 3); - len = 33; - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1); - CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1); - CHECK(memcmp(ctmp, ctmp2, 33) == 0); - /* Adds to two. */ - pubkeys[1] = &pubkey_one; - memset(&pubkey, 255, sizeof(secp256k1_pubkey)); - VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1); - VG_CHECK(&pubkey, sizeof(secp256k1_pubkey)); - CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0); - CHECK(ecount == 3); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); -} - -void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) { - secp256k1_scalar nonce; - do { - random_scalar_order_test(&nonce); - } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid)); -} - -void test_ecdsa_sign_verify(void) { - secp256k1_gej pubj; - secp256k1_ge pub; - secp256k1_scalar one; - secp256k1_scalar msg, key; - secp256k1_scalar sigr, sigs; - int recid; - int getrec; - random_scalar_order_test(&msg); - random_scalar_order_test(&key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); - secp256k1_ge_set_gej(&pub, &pubj); - getrec = secp256k1_rand_bits(1); - random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL); - if (getrec) { - CHECK(recid >= 0 && recid < 4); - } - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); - secp256k1_scalar_set_int(&one, 1); - secp256k1_scalar_add(&msg, &msg, &one); - CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg)); -} - -void run_ecdsa_sign_verify(void) { - int i; - for (i = 0; i < 10*count; i++) { - test_ecdsa_sign_verify(); - } -} - -/** Dummy nonce generation function that just uses a precomputed nonce, and fails if it is not accepted. Use only for testing. */ -static int precomputed_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { - (void)msg32; - (void)key32; - (void)algo16; - memcpy(nonce32, data, 32); - return (counter == 0); -} - -static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { - /* Dummy nonce generator that has a fatal error on the first counter value. */ - if (counter == 0) { - return 0; - } - return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 1); -} - -static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) { - /* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */ - if (counter < 3) { - memset(nonce32, counter==0 ? 0 : 255, 32); - if (counter == 2) { - nonce32[31]--; - } - return 1; - } - if (counter < 5) { - static const unsigned char order[] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 - }; - memcpy(nonce32, order, 32); - if (counter == 4) { - nonce32[31]++; - } - return 1; - } - /* Retry rate of 6979 is negligible esp. as we only call this in deterministic tests. */ - /* If someone does fine a case where it retries for secp256k1, we'd like to know. */ - if (counter > 5) { - return 0; - } - return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5); -} - -int is_empty_signature(const secp256k1_ecdsa_signature *sig) { - static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0}; - return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0; -} - -void test_ecdsa_end_to_end(void) { - unsigned char extra[32] = {0x00}; - unsigned char privkey[32]; - unsigned char message[32]; - unsigned char privkey2[32]; - secp256k1_ecdsa_signature signature[6]; - secp256k1_scalar r, s; - unsigned char sig[74]; - size_t siglen = 74; - unsigned char pubkeyc[65]; - size_t pubkeyclen = 65; - secp256k1_pubkey pubkey; - unsigned char seckey[300]; - size_t seckeylen = 300; - - /* Generate a random key and message. */ - { - secp256k1_scalar msg, key; - random_scalar_order_test(&msg); - random_scalar_order_test(&key); - secp256k1_scalar_get_b32(privkey, &key); - secp256k1_scalar_get_b32(message, &msg); - } - - /* Construct and verify corresponding public key. */ - CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1); - - /* Verify exporting and importing public key. */ - CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED)); - memset(&pubkey, 0, sizeof(pubkey)); - CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1); - - /* Verify private key import and export. */ - CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1)); - CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1); - CHECK(memcmp(privkey, privkey2, 32) == 0); - - /* Optionally tweak the keys using addition. */ - if (secp256k1_rand_int(3) == 0) { - int ret1; - int ret2; - unsigned char rnd[32]; - secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); - ret1 = secp256k1_ec_privkey_tweak_add(ctx, privkey, rnd); - ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd); - CHECK(ret1 == ret2); - if (ret1 == 0) { - return; - } - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - } - - /* Optionally tweak the keys using multiplication. */ - if (secp256k1_rand_int(3) == 0) { - int ret1; - int ret2; - unsigned char rnd[32]; - secp256k1_pubkey pubkey2; - secp256k1_rand256_test(rnd); - ret1 = secp256k1_ec_privkey_tweak_mul(ctx, privkey, rnd); - ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd); - CHECK(ret1 == ret2); - if (ret1 == 0) { - return; - } - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1); - CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0); - } - - /* Sign. */ - CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1); - extra[31] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1); - extra[31] = 0; - extra[0] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1); - CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0); - CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0); - CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0); - /* Verify. */ - CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1); - /* Test lower-S form, malleate, verify and fail, test again, malleate again */ - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[0])); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &signature[0]); - secp256k1_scalar_negate(&s, &s); - secp256k1_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0); - CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5])); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - secp256k1_scalar_negate(&s, &s); - secp256k1_ecdsa_signature_save(&signature[5], &r, &s); - CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5])); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1); - CHECK(memcmp(&signature[5], &signature[0], 64) == 0); - - /* Serialize/parse DER and verify again */ - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - memset(&signature[0], 0, sizeof(signature[0])); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1); - /* Serialize/destroy/parse DER and verify again. */ - siglen = 74; - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1); - sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 || - secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0); -} - -void test_random_pubkeys(void) { - secp256k1_ge elem; - secp256k1_ge elem2; - unsigned char in[65]; - /* Generate some randomly sized pubkeys. */ - size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33; - if (secp256k1_rand_bits(2) == 0) { - len = secp256k1_rand_bits(6); - } - if (len == 65) { - in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7); - } else { - in[0] = secp256k1_rand_bits(1) ? 2 : 3; - } - if (secp256k1_rand_bits(3) == 0) { - in[0] = secp256k1_rand_bits(8); - } - if (len > 1) { - secp256k1_rand256(&in[1]); - } - if (len > 33) { - secp256k1_rand256(&in[33]); - } - if (secp256k1_eckey_pubkey_parse(&elem, in, len)) { - unsigned char out[65]; - unsigned char firstb; - int res; - size_t size = len; - firstb = in[0]; - /* If the pubkey can be parsed, it should round-trip... */ - CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33)); - CHECK(size == len); - CHECK(memcmp(&in[1], &out[1], len-1) == 0); - /* ... except for the type of hybrid inputs. */ - if ((in[0] != 6) && (in[0] != 7)) { - CHECK(in[0] == out[0]); - } - size = 65; - CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0)); - CHECK(size == 65); - CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size)); - ge_equals_ge(&elem,&elem2); - /* Check that the X9.62 hybrid type is checked. */ - in[0] = secp256k1_rand_bits(1) ? 6 : 7; - res = secp256k1_eckey_pubkey_parse(&elem2, in, size); - if (firstb == 2 || firstb == 3) { - if (in[0] == firstb + 4) { - CHECK(res); - } else { - CHECK(!res); - } - } - if (res) { - ge_equals_ge(&elem,&elem2); - CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0)); - CHECK(memcmp(&in[1], &out[1], 64) == 0); - } - } -} - -void run_random_pubkeys(void) { - int i; - for (i = 0; i < 10*count; i++) { - test_random_pubkeys(); - } -} - -void run_ecdsa_end_to_end(void) { - int i; - for (i = 0; i < 64*count; i++) { - test_ecdsa_end_to_end(); - } -} - -int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_der, int certainly_not_der) { - static const unsigned char zeroes[32] = {0}; -#ifdef ENABLE_OPENSSL_TESTS - static const unsigned char max_scalar[32] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40 - }; -#endif - - int ret = 0; - - secp256k1_ecdsa_signature sig_der; - unsigned char roundtrip_der[2048]; - unsigned char compact_der[64]; - size_t len_der = 2048; - int parsed_der = 0, valid_der = 0, roundtrips_der = 0; - - secp256k1_ecdsa_signature sig_der_lax; - unsigned char roundtrip_der_lax[2048]; - unsigned char compact_der_lax[64]; - size_t len_der_lax = 2048; - int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0; - -#ifdef ENABLE_OPENSSL_TESTS - ECDSA_SIG *sig_openssl; - const unsigned char *sigptr; - unsigned char roundtrip_openssl[2048]; - int len_openssl = 2048; - int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0; -#endif - - parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen); - if (parsed_der) { - ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0; - valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0); - } - if (valid_der) { - ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1; - roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0; - } - - parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen); - if (parsed_der_lax) { - ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10; - valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0); - } - if (valid_der_lax) { - ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11; - roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0; - } - - if (certainly_der) { - ret |= (!parsed_der) << 2; - } - if (certainly_not_der) { - ret |= (parsed_der) << 17; - } - if (valid_der) { - ret |= (!roundtrips_der) << 3; - } - - if (valid_der) { - ret |= (!roundtrips_der_lax) << 12; - ret |= (len_der != len_der_lax) << 13; - ret |= (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0) << 14; - } - ret |= (roundtrips_der != roundtrips_der_lax) << 15; - if (parsed_der) { - ret |= (!parsed_der_lax) << 16; - } - -#ifdef ENABLE_OPENSSL_TESTS - sig_openssl = ECDSA_SIG_new(); - sigptr = sig; - parsed_openssl = (d2i_ECDSA_SIG(&sig_openssl, &sigptr, siglen) != NULL); - if (parsed_openssl) { - valid_openssl = !BN_is_negative(sig_openssl->r) && !BN_is_negative(sig_openssl->s) && BN_num_bits(sig_openssl->r) > 0 && BN_num_bits(sig_openssl->r) <= 256 && BN_num_bits(sig_openssl->s) > 0 && BN_num_bits(sig_openssl->s) <= 256; - if (valid_openssl) { - unsigned char tmp[32] = {0}; - BN_bn2bin(sig_openssl->r, tmp + 32 - BN_num_bytes(sig_openssl->r)); - valid_openssl = memcmp(tmp, max_scalar, 32) < 0; - } - if (valid_openssl) { - unsigned char tmp[32] = {0}; - BN_bn2bin(sig_openssl->s, tmp + 32 - BN_num_bytes(sig_openssl->s)); - valid_openssl = memcmp(tmp, max_scalar, 32) < 0; - } - } - len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL); - if (len_openssl <= 2048) { - unsigned char *ptr = roundtrip_openssl; - CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl); - roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0); - } else { - len_openssl = 0; - } - ECDSA_SIG_free(sig_openssl); - - ret |= (parsed_der && !parsed_openssl) << 4; - ret |= (valid_der && !valid_openssl) << 5; - ret |= (roundtrips_openssl && !parsed_der) << 6; - ret |= (roundtrips_der != roundtrips_openssl) << 7; - if (roundtrips_openssl) { - ret |= (len_der != (size_t)len_openssl) << 8; - ret |= (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0) << 9; - } -#endif - return ret; -} - -static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) { - size_t i; - for (i = 0; i < ptrlen; i++) { - int shift = ptrlen - 1 - i; - if (shift >= 4) { - ptr[i] = 0; - } else { - ptr[i] = (val >> shift) & 0xFF; - } - } -} - -static void damage_array(unsigned char *sig, size_t *len) { - int pos; - int action = secp256k1_rand_bits(3); - if (action < 1 && *len > 3) { - /* Delete a byte. */ - pos = secp256k1_rand_int(*len); - memmove(sig + pos, sig + pos + 1, *len - pos - 1); - (*len)--; - return; - } else if (action < 2 && *len < 2048) { - /* Insert a byte. */ - pos = secp256k1_rand_int(1 + *len); - memmove(sig + pos + 1, sig + pos, *len - pos); - sig[pos] = secp256k1_rand_bits(8); - (*len)++; - return; - } else if (action < 4) { - /* Modify a byte. */ - sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255); - return; - } else { /* action < 8 */ - /* Modify a bit. */ - sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3); - return; - } -} - -static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly_der, int* certainly_not_der) { - int der; - int nlow[2], nlen[2], nlenlen[2], nhbit[2], nhbyte[2], nzlen[2]; - size_t tlen, elen, glen; - int indet; - int n; - - *len = 0; - der = secp256k1_rand_bits(2) == 0; - *certainly_der = der; - *certainly_not_der = 0; - indet = der ? 0 : secp256k1_rand_int(10) == 0; - - for (n = 0; n < 2; n++) { - /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */ - nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0); - /* The length of the number in bytes (the first byte of which will always be nonzero) */ - nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8; - CHECK(nlen[n] <= 232); - /* The top bit of the number. */ - nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1)); - /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */ - nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127)); - /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */ - nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8); - if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) { - *certainly_not_der = 1; - } - CHECK(nlen[n] + nzlen[n] <= 300); - /* The length of the length descriptor for the number. 0 means short encoding, anything else is long encoding. */ - nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2); - if (!der) { - /* nlenlen[n] max 127 bytes */ - int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; - nlenlen[n] += add; - if (add != 0) { - *certainly_not_der = 1; - } - } - CHECK(nlen[n] + nzlen[n] + nlenlen[n] <= 427); - } - - /* The total length of the data to go, so far */ - tlen = 2 + nlenlen[0] + nlen[0] + nzlen[0] + 2 + nlenlen[1] + nlen[1] + nzlen[1]; - CHECK(tlen <= 856); - - /* The length of the garbage inside the tuple. */ - elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8; - if (elen != 0) { - *certainly_not_der = 1; - } - tlen += elen; - CHECK(tlen <= 980); - - /* The length of the garbage after the end of the tuple. */ - glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8; - if (glen != 0) { - *certainly_not_der = 1; - } - CHECK(tlen + glen <= 990); - - /* Write the tuple header. */ - sig[(*len)++] = 0x30; - if (indet) { - /* Indeterminate length */ - sig[(*len)++] = 0x80; - *certainly_not_der = 1; - } else { - int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2); - if (!der) { - int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256; - tlenlen += add; - if (add != 0) { - *certainly_not_der = 1; - } - } - if (tlenlen == 0) { - /* Short length notation */ - sig[(*len)++] = tlen; - } else { - /* Long length notation */ - sig[(*len)++] = 128 + tlenlen; - assign_big_endian(sig + *len, tlenlen, tlen); - *len += tlenlen; - } - tlen += tlenlen; - } - tlen += 2; - CHECK(tlen + glen <= 1119); - - for (n = 0; n < 2; n++) { - /* Write the integer header. */ - sig[(*len)++] = 0x02; - if (nlenlen[n] == 0) { - /* Short length notation */ - sig[(*len)++] = nlen[n] + nzlen[n]; - } else { - /* Long length notation. */ - sig[(*len)++] = 128 + nlenlen[n]; - assign_big_endian(sig + *len, nlenlen[n], nlen[n] + nzlen[n]); - *len += nlenlen[n]; - } - /* Write zero padding */ - while (nzlen[n] > 0) { - sig[(*len)++] = 0x00; - nzlen[n]--; - } - if (nlen[n] == 32 && !nlow[n]) { - /* Special extra 16 0xFF bytes in "high" 32-byte numbers */ - int i; - for (i = 0; i < 16; i++) { - sig[(*len)++] = 0xFF; - } - nlen[n] -= 16; - } - /* Write first byte of number */ - if (nlen[n] > 0) { - sig[(*len)++] = nhbyte[n]; - nlen[n]--; - } - /* Generate remaining random bytes of number */ - secp256k1_rand_bytes_test(sig + *len, nlen[n]); - *len += nlen[n]; - nlen[n] = 0; - } - - /* Generate random garbage inside tuple. */ - secp256k1_rand_bytes_test(sig + *len, elen); - *len += elen; - - /* Generate end-of-contents bytes. */ - if (indet) { - sig[(*len)++] = 0; - sig[(*len)++] = 0; - tlen += 2; - } - CHECK(tlen + glen <= 1121); - - /* Generate random garbage outside tuple. */ - secp256k1_rand_bytes_test(sig + *len, glen); - *len += glen; - tlen += glen; - CHECK(tlen <= 1121); - CHECK(tlen == *len); -} - -void run_ecdsa_der_parse(void) { - int i,j; - for (i = 0; i < 200 * count; i++) { - unsigned char buffer[2048]; - size_t buflen = 0; - int certainly_der = 0; - int certainly_not_der = 0; - random_ber_signature(buffer, &buflen, &certainly_der, &certainly_not_der); - CHECK(buflen <= 2048); - for (j = 0; j < 16; j++) { - int ret = 0; - if (j > 0) { - damage_array(buffer, &buflen); - /* We don't know anything anymore about the DERness of the result */ - certainly_der = 0; - certainly_not_der = 0; - } - ret = test_ecdsa_der_parse(buffer, buflen, certainly_der, certainly_not_der); - if (ret != 0) { - size_t k; - fprintf(stderr, "Failure %x on ", ret); - for (k = 0; k < buflen; k++) { - fprintf(stderr, "%02x ", buffer[k]); - } - fprintf(stderr, "\n"); - } - CHECK(ret == 0); - } - } -} - -/* Tests several edge cases. */ -void test_ecdsa_edge_cases(void) { - int t; - secp256k1_ecdsa_signature sig; - - /* Test the case where ECDSA recomputes a point that is infinity. */ - { - secp256k1_gej keyj; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_negate(&ss, &ss); - secp256k1_scalar_inverse(&ss, &ss); - secp256k1_scalar_set_int(&sr, 1); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr); - secp256k1_ge_set_gej(&key, &keyj); - msg = ss; - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - } - - /* Verify signature with r of zero fails. */ - { - const unsigned char pubkey_mods_zero[33] = { - 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, - 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, - 0x41 - }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 0); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - } - - /* Verify signature with s of zero fails. */ - { - const unsigned char pubkey[33] = { - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01 - }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 0); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 1); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - } - - /* Verify signature with message 0 passes. */ - { - const unsigned char pubkey[33] = { - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02 - }; - const unsigned char pubkey2[33] = { - 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, - 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, - 0x43 - }; - secp256k1_ge key; - secp256k1_ge key2; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 2); - secp256k1_scalar_set_int(&msg, 0); - secp256k1_scalar_set_int(&sr, 2); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_set_int(&ss, 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); - } - - /* Verify signature with message 1 passes. */ - { - const unsigned char pubkey[33] = { - 0x02, 0x14, 0x4e, 0x5a, 0x58, 0xef, 0x5b, 0x22, - 0x6f, 0xd2, 0xe2, 0x07, 0x6a, 0x77, 0xcf, 0x05, - 0xb4, 0x1d, 0xe7, 0x4a, 0x30, 0x98, 0x27, 0x8c, - 0x93, 0xe6, 0xe6, 0x3c, 0x0b, 0xc4, 0x73, 0x76, - 0x25 - }; - const unsigned char pubkey2[33] = { - 0x02, 0x8a, 0xd5, 0x37, 0xed, 0x73, 0xd9, 0x40, - 0x1d, 0xa0, 0x33, 0xd2, 0xdc, 0xf0, 0xaf, 0xae, - 0x34, 0xcf, 0x5f, 0x96, 0x4c, 0x73, 0x28, 0x0f, - 0x92, 0xc0, 0xf6, 0x9d, 0xd9, 0xb2, 0x09, 0x10, - 0x62 - }; - const unsigned char csr[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, - 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb - }; - secp256k1_ge key; - secp256k1_ge key2; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 1); - secp256k1_scalar_set_b32(&sr, csr, NULL); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1); - secp256k1_scalar_set_int(&ss, 2); - secp256k1_scalar_inverse_var(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0); - } - - /* Verify signature with message -1 passes. */ - { - const unsigned char pubkey[33] = { - 0x03, 0xaf, 0x97, 0xff, 0x7d, 0x3a, 0xf6, 0xa0, - 0x02, 0x94, 0xbd, 0x9f, 0x4b, 0x2e, 0xd7, 0x52, - 0x28, 0xdb, 0x49, 0x2a, 0x65, 0xcb, 0x1e, 0x27, - 0x57, 0x9c, 0xba, 0x74, 0x20, 0xd5, 0x1d, 0x20, - 0xf1 - }; - const unsigned char csr[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4, - 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee - }; - secp256k1_ge key; - secp256k1_scalar msg; - secp256k1_scalar sr, ss; - secp256k1_scalar_set_int(&ss, 1); - secp256k1_scalar_set_int(&msg, 1); - secp256k1_scalar_negate(&msg, &msg); - secp256k1_scalar_set_b32(&sr, csr, NULL); - CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - secp256k1_scalar_negate(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1); - secp256k1_scalar_set_int(&ss, 3); - secp256k1_scalar_inverse_var(&ss, &ss); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0); - } - - /* Signature where s would be zero. */ - { - secp256k1_pubkey pubkey; - size_t siglen; - int32_t ecount; - unsigned char signature[72]; - static const unsigned char nonce[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }; - static const unsigned char nonce2[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x40 - }; - const unsigned char key[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - }; - unsigned char msg[32] = { - 0x86, 0x41, 0x99, 0x81, 0x06, 0x23, 0x44, 0x53, - 0xaa, 0x5f, 0x9d, 0x6a, 0x31, 0x78, 0xf4, 0xf7, - 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62, - 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9, - }; - ecount = 0; - secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0); - msg[31] = 0xaa; - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1); - CHECK(ecount == 0); - CHECK(secp256k1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, key) == 1); - CHECK(secp256k1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0); - CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0); - CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, NULL) == 0); - CHECK(ecount == 6); - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1); - CHECK(ecount == 6); - CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0); - CHECK(ecount == 7); - /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */ - CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0); - CHECK(ecount == 8); - siglen = 72; - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0); - CHECK(ecount == 9); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0); - CHECK(ecount == 10); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0); - CHECK(ecount == 11); - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1); - CHECK(ecount == 11); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0); - CHECK(ecount == 12); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0); - CHECK(ecount == 13); - CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1); - CHECK(ecount == 13); - siglen = 10; - /* Too little room for a signature does not fail via ARGCHECK. */ - CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0); - CHECK(ecount == 13); - ecount = 0; - CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0); - CHECK(ecount == 1); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0); - CHECK(ecount == 2); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1); - CHECK(ecount == 3); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0); - CHECK(ecount == 4); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0); - CHECK(ecount == 5); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1); - CHECK(ecount == 5); - memset(signature, 255, 64); - CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0); - CHECK(ecount == 5); - secp256k1_context_set_illegal_callback(ctx, NULL, NULL); - } - - /* Nonce function corner cases. */ - for (t = 0; t < 2; t++) { - static const unsigned char zero[32] = {0x00}; - int i; - unsigned char key[32]; - unsigned char msg[32]; - secp256k1_ecdsa_signature sig2; - secp256k1_scalar sr[512], ss; - const unsigned char *extra; - extra = t == 0 ? NULL : zero; - memset(msg, 0, 32); - msg[31] = 1; - /* High key results in signature failure. */ - memset(key, 0xFF, 32); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); - CHECK(is_empty_signature(&sig)); - /* Zero key results in signature failure. */ - memset(key, 0, 32); - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0); - CHECK(is_empty_signature(&sig)); - /* Nonce function failure results in signature failure. */ - key[31] = 1; - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0); - CHECK(is_empty_signature(&sig)); - /* The retry loop successfully makes its way to the first good value. */ - CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1); - CHECK(!is_empty_signature(&sig)); - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1); - CHECK(!is_empty_signature(&sig2)); - CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); - /* The default nonce function is deterministic. */ - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); - CHECK(!is_empty_signature(&sig2)); - CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0); - /* The default nonce function changes output with different messages. */ - for(i = 0; i < 256; i++) { - int j; - msg[0] = i; - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); - CHECK(!is_empty_signature(&sig2)); - secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); - for (j = 0; j < i; j++) { - CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); - } - } - msg[0] = 0; - msg[31] = 2; - /* The default nonce function changes output with different keys. */ - for(i = 256; i < 512; i++) { - int j; - key[0] = i - 256; - CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1); - CHECK(!is_empty_signature(&sig2)); - secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2); - for (j = 0; j < i; j++) { - CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j])); - } - } - key[0] = 0; - } - - { - /* Check that optional nonce arguments do not have equivalent effect. */ - const unsigned char zeros[32] = {0}; - unsigned char nonce[32]; - unsigned char nonce2[32]; - unsigned char nonce3[32]; - unsigned char nonce4[32]; - VG_UNDEF(nonce,32); - VG_UNDEF(nonce2,32); - VG_UNDEF(nonce3,32); - VG_UNDEF(nonce4,32); - CHECK(nonce_function_rfc6979(nonce, zeros, zeros, NULL, NULL, 0) == 1); - VG_CHECK(nonce,32); - CHECK(nonce_function_rfc6979(nonce2, zeros, zeros, zeros, NULL, 0) == 1); - VG_CHECK(nonce2,32); - CHECK(nonce_function_rfc6979(nonce3, zeros, zeros, NULL, (void *)zeros, 0) == 1); - VG_CHECK(nonce3,32); - CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1); - VG_CHECK(nonce4,32); - CHECK(memcmp(nonce, nonce2, 32) != 0); - CHECK(memcmp(nonce, nonce3, 32) != 0); - CHECK(memcmp(nonce, nonce4, 32) != 0); - CHECK(memcmp(nonce2, nonce3, 32) != 0); - CHECK(memcmp(nonce2, nonce4, 32) != 0); - CHECK(memcmp(nonce3, nonce4, 32) != 0); - } - - - /* Privkey export where pubkey is the point at infinity. */ - { - unsigned char privkey[300]; - unsigned char seckey[32] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, - 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41, - }; - size_t outlen = 300; - CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 0)); - outlen = 300; - CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 1)); - } -} - -void run_ecdsa_edge_cases(void) { - test_ecdsa_edge_cases(); -} - -#ifdef ENABLE_OPENSSL_TESTS -EC_KEY *get_openssl_key(const unsigned char *key32) { - unsigned char privkey[300]; - size_t privkeylen; - const unsigned char* pbegin = privkey; - int compr = secp256k1_rand_bits(1); - EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1); - CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr)); - CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen)); - CHECK(EC_KEY_check_key(ec_key)); - return ec_key; -} - -void test_ecdsa_openssl(void) { - secp256k1_gej qj; - secp256k1_ge q; - secp256k1_scalar sigr, sigs; - secp256k1_scalar one; - secp256k1_scalar msg2; - secp256k1_scalar key, msg; - EC_KEY *ec_key; - unsigned int sigsize = 80; - size_t secp_sigsize = 80; - unsigned char message[32]; - unsigned char signature[80]; - unsigned char key32[32]; - secp256k1_rand256_test(message); - secp256k1_scalar_set_b32(&msg, message, NULL); - random_scalar_order_test(&key); - secp256k1_scalar_get_b32(key32, &key); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key); - secp256k1_ge_set_gej(&q, &qj); - ec_key = get_openssl_key(key32); - CHECK(ec_key != NULL); - CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key)); - CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize)); - CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg)); - secp256k1_scalar_set_int(&one, 1); - secp256k1_scalar_add(&msg2, &msg, &one); - CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2)); - - random_sign(&sigr, &sigs, &key, &msg, NULL); - CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs)); - CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1); - - EC_KEY_free(ec_key); -} - -void run_ecdsa_openssl(void) { - int i; - for (i = 0; i < 10*count; i++) { - test_ecdsa_openssl(); - } -} -#endif - -#ifdef ENABLE_MODULE_ECDH -# include "modules/ecdh/tests_impl.h" -#endif - -#ifdef ENABLE_MODULE_SCHNORR -# include "modules/schnorr/tests_impl.h" -#endif - -#ifdef ENABLE_MODULE_RECOVERY -# include "modules/recovery/tests_impl.h" -#endif - -int main(int argc, char **argv) { - unsigned char seed16[16] = {0}; - unsigned char run32[32] = {0}; - /* find iteration count */ - if (argc > 1) { - count = strtol(argv[1], NULL, 0); - } - - /* find random seed */ - if (argc > 2) { - int pos = 0; - const char* ch = argv[2]; - while (pos < 16 && ch[0] != 0 && ch[1] != 0) { - unsigned short sh; - if (sscanf(ch, "%2hx", &sh)) { - seed16[pos] = sh; - } else { - break; - } - ch += 2; - pos++; - } - } else { - FILE *frand = fopen("/dev/urandom", "r"); - if ((frand == NULL) || !fread(&seed16, sizeof(seed16), 1, frand)) { - uint64_t t = time(NULL) * (uint64_t)1337; - seed16[0] ^= t; - seed16[1] ^= t >> 8; - seed16[2] ^= t >> 16; - seed16[3] ^= t >> 24; - seed16[4] ^= t >> 32; - seed16[5] ^= t >> 40; - seed16[6] ^= t >> 48; - seed16[7] ^= t >> 56; - } - fclose(frand); - } - secp256k1_rand_seed(seed16); - - printf("test count = %i\n", count); - printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); - - /* initialize */ - run_context_tests(); - ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - if (secp256k1_rand_bits(1)) { - secp256k1_rand256(run32); - CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL)); - } - - run_rand_bits(); - run_rand_int(); - - run_sha256_tests(); - run_hmac_sha256_tests(); - run_rfc6979_hmac_sha256_tests(); - -#ifndef USE_NUM_NONE - /* num tests */ - run_num_smalltests(); -#endif - - /* scalar tests */ - run_scalar_tests(); - - /* field tests */ - run_field_inv(); - run_field_inv_var(); - run_field_inv_all_var(); - run_field_misc(); - run_field_convert(); - run_sqr(); - run_sqrt(); - - /* group tests */ - run_ge(); - run_group_decompress(); - - /* ecmult tests */ - run_wnaf(); - run_point_times_order(); - run_ecmult_chain(); - run_ecmult_constants(); - run_ecmult_gen_blind(); - run_ecmult_const_tests(); - run_ec_combine(); - - /* endomorphism tests */ -#ifdef USE_ENDOMORPHISM - run_endomorphism_tests(); -#endif - - /* EC point parser test */ - run_ec_pubkey_parse_test(); - - /* EC key edge cases */ - run_eckey_edge_case_test(); - -#ifdef ENABLE_MODULE_ECDH - /* ecdh tests */ - run_ecdh_tests(); -#endif - - /* ecdsa tests */ - run_random_pubkeys(); - run_ecdsa_der_parse(); - run_ecdsa_sign_verify(); - run_ecdsa_end_to_end(); - run_ecdsa_edge_cases(); -#ifdef ENABLE_OPENSSL_TESTS - run_ecdsa_openssl(); -#endif - -#ifdef ENABLE_MODULE_SCHNORR - /* Schnorr tests */ - run_schnorr_tests(); -#endif - -#ifdef ENABLE_MODULE_RECOVERY - /* ECDSA pubkey recovery tests */ - run_recovery_tests(); -#endif - - secp256k1_rand256(run32); - printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]); - - /* shutdown */ - secp256k1_context_destroy(ctx); - - printf("no problems found\n"); - return 0; -} diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests_exhaustive.c b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests_exhaustive.c deleted file mode 100644 index b040bb073..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/tests_exhaustive.c +++ /dev/null @@ -1,470 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2016 Andrew Poelstra * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include -#include - -#include - -#undef USE_ECMULT_STATIC_PRECOMPUTATION - -#ifndef EXHAUSTIVE_TEST_ORDER -/* see group_impl.h for allowable values */ -#define EXHAUSTIVE_TEST_ORDER 13 -#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ -#endif - -#include "include/secp256k1.h" -#include "group.h" -#include "secp256k1.c" -#include "testrand_impl.h" - -#ifdef ENABLE_MODULE_RECOVERY -#include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" -#endif - -/** stolen from tests.c */ -void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) { - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - CHECK(secp256k1_fe_equal_var(&a->x, &b->x)); - CHECK(secp256k1_fe_equal_var(&a->y, &b->y)); -} - -void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) { - secp256k1_fe z2s; - secp256k1_fe u1, u2, s1, s2; - CHECK(a->infinity == b->infinity); - if (a->infinity) { - return; - } - /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */ - secp256k1_fe_sqr(&z2s, &b->z); - secp256k1_fe_mul(&u1, &a->x, &z2s); - u2 = b->x; secp256k1_fe_normalize_weak(&u2); - secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z); - s2 = b->y; secp256k1_fe_normalize_weak(&s2); - CHECK(secp256k1_fe_equal_var(&u1, &u2)); - CHECK(secp256k1_fe_equal_var(&s1, &s2)); -} - -void random_fe(secp256k1_fe *x) { - unsigned char bin[32]; - do { - secp256k1_rand256(bin); - if (secp256k1_fe_set_b32(x, bin)) { - return; - } - } while(1); -} -/** END stolen from tests.c */ - -int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, - const unsigned char *key32, const unsigned char *algo16, - void *data, unsigned int attempt) { - secp256k1_scalar s; - int *idata = data; - (void)msg32; - (void)key32; - (void)algo16; - /* Some nonces cannot be used because they'd cause s and/or r to be zero. - * The signing function has retry logic here that just re-calls the nonce - * function with an increased `attempt`. So if attempt > 0 this means we - * need to change the nonce to avoid an infinite loop. */ - if (attempt > 0) { - *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; - } - secp256k1_scalar_set_int(&s, *idata); - secp256k1_scalar_get_b32(nonce32, &s); - return 1; -} - -#ifdef USE_ENDOMORPHISM -void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) { - int i; - for (i = 0; i < order; i++) { - secp256k1_ge res; - secp256k1_ge_mul_lambda(&res, &group[i]); - ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res); - } -} -#endif - -void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { - int i, j; - - /* Sanity-check (and check infinity functions) */ - CHECK(secp256k1_ge_is_infinity(&group[0])); - CHECK(secp256k1_gej_is_infinity(&groupj[0])); - for (i = 1; i < order; i++) { - CHECK(!secp256k1_ge_is_infinity(&group[i])); - CHECK(!secp256k1_gej_is_infinity(&groupj[i])); - } - - /* Check all addition formulae */ - for (j = 0; j < order; j++) { - secp256k1_fe fe_inv; - secp256k1_fe_inv(&fe_inv, &groupj[j].z); - for (i = 0; i < order; i++) { - secp256k1_ge zless_gej; - secp256k1_gej tmp; - /* add_var */ - secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); - ge_equals_gej(&group[(i + j) % order], &tmp); - /* add_ge */ - if (j > 0) { - secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); - ge_equals_gej(&group[(i + j) % order], &tmp); - } - /* add_ge_var */ - secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); - ge_equals_gej(&group[(i + j) % order], &tmp); - /* add_zinv_var */ - zless_gej.infinity = groupj[j].infinity; - zless_gej.x = groupj[j].x; - zless_gej.y = groupj[j].y; - secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); - ge_equals_gej(&group[(i + j) % order], &tmp); - } - } - - /* Check doubling */ - for (i = 0; i < order; i++) { - secp256k1_gej tmp; - if (i > 0) { - secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL); - ge_equals_gej(&group[(2 * i) % order], &tmp); - } - secp256k1_gej_double_var(&tmp, &groupj[i], NULL); - ge_equals_gej(&group[(2 * i) % order], &tmp); - } - - /* Check negation */ - for (i = 1; i < order; i++) { - secp256k1_ge tmp; - secp256k1_gej tmpj; - secp256k1_ge_neg(&tmp, &group[i]); - ge_equals_ge(&group[order - i], &tmp); - secp256k1_gej_neg(&tmpj, &groupj[i]); - ge_equals_gej(&group[order - i], &tmpj); - } -} - -void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) { - int i, j, r_log; - for (r_log = 1; r_log < order; r_log++) { - for (j = 0; j < order; j++) { - for (i = 0; i < order; i++) { - secp256k1_gej tmp; - secp256k1_scalar na, ng; - secp256k1_scalar_set_int(&na, i); - secp256k1_scalar_set_int(&ng, j); - - secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng); - ge_equals_gej(&group[(i * r_log + j) % order], &tmp); - - if (i > 0) { - secp256k1_ecmult_const(&tmp, &group[i], &ng); - ge_equals_gej(&group[(i * j) % order], &tmp); - } - } - } - } -} - -void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { - secp256k1_fe x; - unsigned char x_bin[32]; - k %= EXHAUSTIVE_TEST_ORDER; - x = group[k].x; - secp256k1_fe_normalize(&x); - secp256k1_fe_get_b32(x_bin, &x); - secp256k1_scalar_set_b32(r, x_bin, NULL); -} - -void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - int s, r, msg, key; - for (s = 1; s < order; s++) { - for (r = 1; r < order; r++) { - for (msg = 1; msg < order; msg++) { - for (key = 1; key < order; key++) { - secp256k1_ge nonconst_ge; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pk; - secp256k1_scalar sk_s, msg_s, r_s, s_s; - secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; - int k, should_verify; - unsigned char msg32[32]; - - secp256k1_scalar_set_int(&s_s, s); - secp256k1_scalar_set_int(&r_s, r); - secp256k1_scalar_set_int(&msg_s, msg); - secp256k1_scalar_set_int(&sk_s, key); - - /* Verify by hand */ - /* Run through every k value that gives us this r and check that *one* works. - * Note there could be none, there could be multiple, ECDSA is weird. */ - should_verify = 0; - for (k = 0; k < order; k++) { - secp256k1_scalar check_x_s; - r_from_k(&check_x_s, group, k); - if (r_s == check_x_s) { - secp256k1_scalar_set_int(&s_times_k_s, k); - secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); - } - } - /* nb we have a "high s" rule */ - should_verify &= !secp256k1_scalar_is_high(&s_s); - - /* Verify by calling verify */ - secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); - memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - secp256k1_pubkey_save(&pk, &nonconst_ge); - secp256k1_scalar_get_b32(msg32, &msg_s); - CHECK(should_verify == - secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); - } - } - } - } -} - -void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - int i, j, k; - - /* Loop */ - for (i = 1; i < order; i++) { /* message */ - for (j = 1; j < order; j++) { /* key */ - for (k = 1; k < order; k++) { /* nonce */ - const int starting_k = k; - secp256k1_ecdsa_signature sig; - secp256k1_scalar sk, msg, r, s, expected_r; - unsigned char sk32[32], msg32[32]; - secp256k1_scalar_set_int(&msg, i); - secp256k1_scalar_set_int(&sk, j); - secp256k1_scalar_get_b32(sk32, &sk); - secp256k1_scalar_get_b32(msg32, &msg); - - secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); - - secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); - /* Note that we compute expected_r *after* signing -- this is important - * because our nonce-computing function function might change k during - * signing. */ - r_from_k(&expected_r, group, k); - CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); - - /* Overflow means we've tried every possible nonce */ - if (k < starting_k) { - break; - } - } - } - } - - /* We would like to verify zero-knowledge here by counting how often every - * possible (s, r) tuple appears, but because the group order is larger - * than the field order, when coercing the x-values to scalar values, some - * appear more often than others, so we are actually not zero-knowledge. - * (This effect also appears in the real code, but the difference is on the - * order of 1/2^128th the field order, so the deviation is not useful to a - * computationally bounded attacker.) - */ -} - -#ifdef ENABLE_MODULE_RECOVERY -void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - int i, j, k; - - /* Loop */ - for (i = 1; i < order; i++) { /* message */ - for (j = 1; j < order; j++) { /* key */ - for (k = 1; k < order; k++) { /* nonce */ - const int starting_k = k; - secp256k1_fe r_dot_y_normalized; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_scalar sk, msg, r, s, expected_r; - unsigned char sk32[32], msg32[32]; - int expected_recid; - int recid; - secp256k1_scalar_set_int(&msg, i); - secp256k1_scalar_set_int(&sk, j); - secp256k1_scalar_get_b32(sk32, &sk); - secp256k1_scalar_get_b32(msg32, &msg); - - secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k); - - /* Check directly */ - secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig); - r_from_k(&expected_r, group, k); - CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); - /* In computing the recid, there is an overflow condition that is disabled in - * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value - * will exceed the group order, and our signing code always holds out for r - * values that don't overflow, so with a proper overflow check the tests would - * loop indefinitely. */ - r_dot_y_normalized = group[k].y; - secp256k1_fe_normalize(&r_dot_y_normalized); - /* Also the recovery id is flipped depending if we hit the low-s branch */ - if ((k * s) % order == (i + r * j) % order) { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0; - } else { - expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1; - } - CHECK(recid == expected_recid); - - /* Convert to a standard sig then check */ - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); - /* Note that we compute expected_r *after* signing -- this is important - * because our nonce-computing function function might change k during - * signing. */ - r_from_k(&expected_r, group, k); - CHECK(r == expected_r); - CHECK((k * s) % order == (i + r * j) % order || - (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order); - - /* Overflow means we've tried every possible nonce */ - if (k < starting_k) { - break; - } - } - } - } -} - -void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) { - /* This is essentially a copy of test_exhaustive_verify, with recovery added */ - int s, r, msg, key; - for (s = 1; s < order; s++) { - for (r = 1; r < order; r++) { - for (msg = 1; msg < order; msg++) { - for (key = 1; key < order; key++) { - secp256k1_ge nonconst_ge; - secp256k1_ecdsa_recoverable_signature rsig; - secp256k1_ecdsa_signature sig; - secp256k1_pubkey pk; - secp256k1_scalar sk_s, msg_s, r_s, s_s; - secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; - int recid = 0; - int k, should_verify; - unsigned char msg32[32]; - - secp256k1_scalar_set_int(&s_s, s); - secp256k1_scalar_set_int(&r_s, r); - secp256k1_scalar_set_int(&msg_s, msg); - secp256k1_scalar_set_int(&sk_s, key); - secp256k1_scalar_get_b32(msg32, &msg_s); - - /* Verify by hand */ - /* Run through every k value that gives us this r and check that *one* works. - * Note there could be none, there could be multiple, ECDSA is weird. */ - should_verify = 0; - for (k = 0; k < order; k++) { - secp256k1_scalar check_x_s; - r_from_k(&check_x_s, group, k); - if (r_s == check_x_s) { - secp256k1_scalar_set_int(&s_times_k_s, k); - secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); - secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); - secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); - should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); - } - } - /* nb we have a "high s" rule */ - should_verify &= !secp256k1_scalar_is_high(&s_s); - - /* We would like to try recovering the pubkey and checking that it matches, - * but pubkey recovery is impossible in the exhaustive tests (the reason - * being that there are 12 nonzero r values, 12 nonzero points, and no - * overlap between the sets, so there are no valid signatures). */ - - /* Verify by converting to a standard signature and calling verify */ - secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid); - secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig); - memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); - secp256k1_pubkey_save(&pk, &nonconst_ge); - CHECK(should_verify == - secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); - } - } - } - } -} -#endif - -int main(void) { - int i; - secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; - secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; - - /* Build context */ - secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - - /* TODO set z = 1, then do num_tests runs with random z values */ - - /* Generate the entire group */ - secp256k1_gej_set_infinity(&groupj[0]); - secp256k1_ge_set_gej(&group[0], &groupj[0]); - for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { - /* Set a different random z-value for each Jacobian point */ - secp256k1_fe z; - random_fe(&z); - - secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); - secp256k1_ge_set_gej(&group[i], &groupj[i]); - secp256k1_gej_rescale(&groupj[i], &z); - - /* Verify against ecmult_gen */ - { - secp256k1_scalar scalar_i; - secp256k1_gej generatedj; - secp256k1_ge generated; - - secp256k1_scalar_set_int(&scalar_i, i); - secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); - secp256k1_ge_set_gej(&generated, &generatedj); - - CHECK(group[i].infinity == 0); - CHECK(generated.infinity == 0); - CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x)); - CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y)); - } - } - - /* Run the tests */ -#ifdef USE_ENDOMORPHISM - test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER); -#endif - test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); - -#ifdef ENABLE_MODULE_RECOVERY - test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER); - test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER); -#endif - - secp256k1_context_destroy(ctx); - return 0; -} - diff --git a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/util.h b/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/util.h deleted file mode 100644 index 4092a86c9..000000000 --- a/crypto/secp256k1/internal/secp256k1/libsecp256k1/src/util.h +++ /dev/null @@ -1,113 +0,0 @@ -/********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or http://www.opensource.org/licenses/mit-license.php.* - **********************************************************************/ - -#ifndef _SECP256K1_UTIL_H_ -#define _SECP256K1_UTIL_H_ - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include -#include -#include - -typedef struct { - void (*fn)(const char *text, void* data); - const void* data; -} secp256k1_callback; - -static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) { - cb->fn(text, (void*)cb->data); -} - -#ifdef DETERMINISTIC -#define TEST_FAILURE(msg) do { \ - fprintf(stderr, "%s\n", msg); \ - abort(); \ -} while(0); -#else -#define TEST_FAILURE(msg) do { \ - fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \ - abort(); \ -} while(0) -#endif - -#ifdef HAVE_BUILTIN_EXPECT -#define EXPECT(x,c) __builtin_expect((x),(c)) -#else -#define EXPECT(x,c) (x) -#endif - -#ifdef DETERMINISTIC -#define CHECK(cond) do { \ - if (EXPECT(!(cond), 0)) { \ - TEST_FAILURE("test condition failed"); \ - } \ -} while(0) -#else -#define CHECK(cond) do { \ - if (EXPECT(!(cond), 0)) { \ - TEST_FAILURE("test condition failed: " #cond); \ - } \ -} while(0) -#endif - -/* Like assert(), but when VERIFY is defined, and side-effect safe. */ -#if defined(COVERAGE) -#define VERIFY_CHECK(check) -#define VERIFY_SETUP(stmt) -#elif defined(VERIFY) -#define VERIFY_CHECK CHECK -#define VERIFY_SETUP(stmt) do { stmt; } while(0) -#else -#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) -#define VERIFY_SETUP(stmt) -#endif - -static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) { - void *ret = malloc(size); - if (ret == NULL) { - secp256k1_callback_call(cb, "Out of memory"); - } - return ret; -} - -/* Macro for restrict, when available and not in a VERIFY build. */ -#if defined(SECP256K1_BUILD) && defined(VERIFY) -# define SECP256K1_RESTRICT -#else -# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) -# if SECP256K1_GNUC_PREREQ(3,0) -# define SECP256K1_RESTRICT __restrict__ -# elif (defined(_MSC_VER) && _MSC_VER >= 1400) -# define SECP256K1_RESTRICT __restrict -# else -# define SECP256K1_RESTRICT -# endif -# else -# define SECP256K1_RESTRICT restrict -# endif -#endif - -#if defined(_WIN32) -# define I64FORMAT "I64d" -# define I64uFORMAT "I64u" -#else -# define I64FORMAT "lld" -# define I64uFORMAT "llu" -#endif - -#if defined(HAVE___INT128) -# if defined(__GNUC__) -# define SECP256K1_GNUC_EXT __extension__ -# else -# define SECP256K1_GNUC_EXT -# endif -SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t; -#endif - -#endif diff --git a/crypto/secp256k1/internal/secp256k1/panic_cb.go b/crypto/secp256k1/internal/secp256k1/panic_cb.go deleted file mode 100644 index 6d59a1d24..000000000 --- a/crypto/secp256k1/internal/secp256k1/panic_cb.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -package secp256k1 - -import "C" -import "unsafe" - -// Callbacks for converting libsecp256k1 internal faults into -// recoverable Go panics. - -//export secp256k1GoPanicIllegal -func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) { - panic("illegal argument: " + C.GoString(msg)) -} - -//export secp256k1GoPanicError -func secp256k1GoPanicError(msg *C.char, data unsafe.Pointer) { - panic("internal error: " + C.GoString(msg)) -} diff --git a/crypto/secp256k1/internal/secp256k1/secp256.go b/crypto/secp256k1/internal/secp256k1/secp256.go deleted file mode 100644 index 35d0eef34..000000000 --- a/crypto/secp256k1/internal/secp256k1/secp256.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// Package secp256k1 wraps the bitcoin secp256k1 C library. -package secp256k1 - -/* -#cgo CFLAGS: -I./libsecp256k1 -#cgo CFLAGS: -I./libsecp256k1/src/ -#define USE_NUM_NONE -#define USE_FIELD_10X26 -#define USE_FIELD_INV_BUILTIN -#define USE_SCALAR_8X32 -#define USE_SCALAR_INV_BUILTIN -#define NDEBUG -#include "./libsecp256k1/src/secp256k1.c" -#include "./libsecp256k1/src/modules/recovery/main_impl.h" -#include "ext.h" - -typedef void (*callbackFunc) (const char* msg, void* data); -extern void secp256k1GoPanicIllegal(const char* msg, void* data); -extern void secp256k1GoPanicError(const char* msg, void* data); -*/ -import "C" - -import ( - "errors" - "math/big" - "unsafe" -) - -var context *C.secp256k1_context - -func init() { - // around 20 ms on a modern CPU. - context = C.secp256k1_context_create_sign_verify() - C.secp256k1_context_set_illegal_callback(context, C.callbackFunc(C.secp256k1GoPanicIllegal), nil) - C.secp256k1_context_set_error_callback(context, C.callbackFunc(C.secp256k1GoPanicError), nil) -} - -var ( - ErrInvalidMsgLen = errors.New("invalid message length, need 32 bytes") - ErrInvalidSignatureLen = errors.New("invalid signature length") - ErrInvalidRecoveryID = errors.New("invalid signature recovery id") - ErrInvalidKey = errors.New("invalid private key") - ErrInvalidPubkey = errors.New("invalid public key") - ErrSignFailed = errors.New("signing failed") - ErrRecoverFailed = errors.New("recovery failed") -) - -// Sign creates a recoverable ECDSA signature. -// The produced signature is in the 65-byte [R || S || V] format where V is 0 or 1. -// -// The caller is responsible for ensuring that msg cannot be chosen -// directly by an attacker. It is usually preferable to use a cryptographic -// hash function on any input before handing it to this function. -func Sign(msg []byte, seckey []byte) ([]byte, error) { - if len(msg) != 32 { - return nil, ErrInvalidMsgLen - } - if len(seckey) != 32 { - return nil, ErrInvalidKey - } - seckeydata := (*C.uchar)(unsafe.Pointer(&seckey[0])) - if C.secp256k1_ec_seckey_verify(context, seckeydata) != 1 { - return nil, ErrInvalidKey - } - - var ( - msgdata = (*C.uchar)(unsafe.Pointer(&msg[0])) - noncefunc = C.secp256k1_nonce_function_rfc6979 - sigstruct C.secp256k1_ecdsa_recoverable_signature - ) - if C.secp256k1_ecdsa_sign_recoverable(context, &sigstruct, msgdata, seckeydata, noncefunc, nil) == 0 { - return nil, ErrSignFailed - } - - var ( - sig = make([]byte, 65) - sigdata = (*C.uchar)(unsafe.Pointer(&sig[0])) - recid C.int - ) - C.secp256k1_ecdsa_recoverable_signature_serialize_compact(context, sigdata, &recid, &sigstruct) - sig[64] = byte(recid) // add back recid to get 65 bytes sig - return sig, nil -} - -// RecoverPubkey returns the public key of the signer. -// msg must be the 32-byte hash of the message to be signed. -// sig must be a 65-byte compact ECDSA signature containing the -// recovery id as the last element. -func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) { - if len(msg) != 32 { - return nil, ErrInvalidMsgLen - } - if err := checkSignature(sig); err != nil { - return nil, err - } - - var ( - pubkey = make([]byte, 65) - sigdata = (*C.uchar)(unsafe.Pointer(&sig[0])) - msgdata = (*C.uchar)(unsafe.Pointer(&msg[0])) - ) - if C.secp256k1_ext_ecdsa_recover(context, (*C.uchar)(unsafe.Pointer(&pubkey[0])), sigdata, msgdata) == 0 { - return nil, ErrRecoverFailed - } - return pubkey, nil -} - -// VerifySignature checks that the given pubkey created signature over message. -// The signature should be in [R || S] format. -func VerifySignature(pubkey, msg, signature []byte) bool { - if len(msg) != 32 || len(signature) != 64 || len(pubkey) == 0 { - return false - } - sigdata := (*C.uchar)(unsafe.Pointer(&signature[0])) - msgdata := (*C.uchar)(unsafe.Pointer(&msg[0])) - keydata := (*C.uchar)(unsafe.Pointer(&pubkey[0])) - return C.secp256k1_ext_ecdsa_verify(context, sigdata, msgdata, keydata, C.size_t(len(pubkey))) != 0 -} - -// DecompressPubkey parses a public key in the 33-byte compressed format. -// It returns non-nil coordinates if the public key is valid. -func DecompressPubkey(pubkey []byte) (x, y *big.Int) { - if len(pubkey) != 33 { - return nil, nil - } - var ( - pubkeydata = (*C.uchar)(unsafe.Pointer(&pubkey[0])) - pubkeylen = C.size_t(len(pubkey)) - out = make([]byte, 65) - outdata = (*C.uchar)(unsafe.Pointer(&out[0])) - outlen = C.size_t(len(out)) - ) - if C.secp256k1_ext_reencode_pubkey(context, outdata, outlen, pubkeydata, pubkeylen) == 0 { - return nil, nil - } - return new(big.Int).SetBytes(out[1:33]), new(big.Int).SetBytes(out[33:]) -} - -// CompressPubkey encodes a public key to 33-byte compressed format. -func CompressPubkey(x, y *big.Int) []byte { - var ( - pubkey = S256().Marshal(x, y) - pubkeydata = (*C.uchar)(unsafe.Pointer(&pubkey[0])) - pubkeylen = C.size_t(len(pubkey)) - out = make([]byte, 33) - outdata = (*C.uchar)(unsafe.Pointer(&out[0])) - outlen = C.size_t(len(out)) - ) - if C.secp256k1_ext_reencode_pubkey(context, outdata, outlen, pubkeydata, pubkeylen) == 0 { - panic("libsecp256k1 error") - } - return out -} - -func checkSignature(sig []byte) error { - if len(sig) != 65 { - return ErrInvalidSignatureLen - } - if sig[64] >= 4 { - return ErrInvalidRecoveryID - } - return nil -} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 5338d10a5..fe5296900 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -11,71 +11,70 @@ import ( secp256k1 "github.com/btcsuite/btcd/btcec" "golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" + tmjson "github.com/tendermint/tendermint/libs/json" ) //------------------------------------- const ( - PrivKeyAminoName = "tendermint/PrivKeySecp256k1" - PubKeyAminoName = "tendermint/PubKeySecp256k1" -) + PrivKeyName = "tendermint/PrivKeySecp256k1" + PubKeyName = "tendermint/PubKeySecp256k1" -var cdc = amino.NewCodec() + KeyType = "secp256k1" + PrivKeySize = 32 +) func init() { - cdc.RegisterInterface((*crypto.PubKey)(nil), nil) - cdc.RegisterConcrete(PubKeySecp256k1{}, - PubKeyAminoName, nil) - - cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) - cdc.RegisterConcrete(PrivKeySecp256k1{}, - PrivKeyAminoName, nil) + tmjson.RegisterType(PubKey{}, PubKeyName) + tmjson.RegisterType(PrivKey{}, PrivKeyName) } -//------------------------------------- - -var _ crypto.PrivKey = PrivKeySecp256k1{} +var _ crypto.PrivKey = PrivKey{} -// PrivKeySecp256k1 implements PrivKey. -type PrivKeySecp256k1 [32]byte +// PrivKey implements PrivKey. +type PrivKey []byte // Bytes marshalls the private key using amino encoding. -func (privKey PrivKeySecp256k1) Bytes() []byte { - return cdc.MustMarshalBinaryBare(privKey) +func (privKey PrivKey) Bytes() []byte { + return []byte(privKey) } // PubKey performs the point-scalar multiplication from the privKey on the // generator point to get the pubkey. -func (privKey PrivKeySecp256k1) PubKey() crypto.PubKey { - _, pubkeyObject := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) - var pubkeyBytes PubKeySecp256k1 - copy(pubkeyBytes[:], pubkeyObject.SerializeCompressed()) - return pubkeyBytes +func (privKey PrivKey) PubKey() crypto.PubKey { + _, pubkeyObject := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey) + + pk := pubkeyObject.SerializeCompressed() + + return PubKey(pk) } // Equals - you probably don't need to use this. // Runs in constant time based on length of the keys. -func (privKey PrivKeySecp256k1) Equals(other crypto.PrivKey) bool { - if otherSecp, ok := other.(PrivKeySecp256k1); ok { +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherSecp, ok := other.(PrivKey); ok { return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 } return false } +func (privKey PrivKey) Type() string { + return KeyType +} + // GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. // It uses OS randomness to generate the private key. -func GenPrivKey() PrivKeySecp256k1 { +func GenPrivKey() PrivKey { return genPrivKey(crypto.CReader()) } // genPrivKey generates a new secp256k1 private key using the provided reader. -func genPrivKey(rand io.Reader) PrivKeySecp256k1 { - var privKeyBytes [32]byte +func genPrivKey(rand io.Reader) PrivKey { + var privKeyBytes [PrivKeySize]byte d := new(big.Int) + for { - privKeyBytes = [32]byte{} + privKeyBytes = [PrivKeySize]byte{} _, err := io.ReadFull(rand, privKeyBytes[:]) if err != nil { panic(err) @@ -89,7 +88,7 @@ func genPrivKey(rand io.Reader) PrivKeySecp256k1 { } } - return PrivKeySecp256k1(privKeyBytes) + return PrivKey(privKeyBytes[:]) } var one = new(big.Int).SetInt64(1) @@ -104,7 +103,7 @@ var one = new(big.Int).SetInt64(1) // // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. -func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 { +func GenPrivKeySecp256k1(secret []byte) PrivKey { secHash := sha256.Sum256(secret) // to guarantee that we have a valid field element, we use the approach of: // "Suite B Implementer’s Guide to FIPS 186-3", A.2.1 @@ -116,55 +115,59 @@ func GenPrivKeySecp256k1(secret []byte) PrivKeySecp256k1 { fe.Add(fe, one) feB := fe.Bytes() - var privKey32 [32]byte + privKey32 := make([]byte, PrivKeySize) // copy feB over to fixed 32 byte privKey32 and pad (if necessary) copy(privKey32[32-len(feB):32], feB) - return PrivKeySecp256k1(privKey32) + return PrivKey(privKey32) } //------------------------------------- -var _ crypto.PubKey = PubKeySecp256k1{} +var _ crypto.PubKey = PubKey{} -// PubKeySecp256k1Size is comprised of 32 bytes for one field element +// PubKeySize is comprised of 32 bytes for one field element // (the x-coordinate), plus one byte for the parity of the y-coordinate. -const PubKeySecp256k1Size = 33 +const PubKeySize = 33 -// PubKeySecp256k1 implements crypto.PubKey. +// PubKey implements crypto.PubKey. // It is the compressed form of the pubkey. The first byte depends is a 0x02 byte // if the y-coordinate is the lexicographically largest of the two associated with // the x-coordinate. Otherwise the first byte is a 0x03. // This prefix is followed with the x-coordinate. -type PubKeySecp256k1 [PubKeySecp256k1Size]byte +type PubKey []byte // Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) -func (pubKey PubKeySecp256k1) Address() crypto.Address { +func (pubKey PubKey) Address() crypto.Address { + if len(pubKey) != PubKeySize { + panic("length of pubkey is incorrect") + } hasherSHA256 := sha256.New() - hasherSHA256.Write(pubKey[:]) // does not error + _, _ = hasherSHA256.Write(pubKey) // does not error sha := hasherSHA256.Sum(nil) hasherRIPEMD160 := ripemd160.New() - hasherRIPEMD160.Write(sha) // does not error + _, _ = hasherRIPEMD160.Write(sha) // does not error + return crypto.Address(hasherRIPEMD160.Sum(nil)) } // Bytes returns the pubkey marshalled with amino encoding. -func (pubKey PubKeySecp256k1) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(pubKey) - if err != nil { - panic(err) - } - return bz +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) } -func (pubKey PubKeySecp256k1) String() string { +func (pubKey PubKey) String() string { return fmt.Sprintf("PubKeySecp256k1{%X}", pubKey[:]) } -func (pubKey PubKeySecp256k1) Equals(other crypto.PubKey) bool { - if otherSecp, ok := other.(PubKeySecp256k1); ok { +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherSecp, ok := other.(PubKey); ok { return bytes.Equal(pubKey[:], otherSecp[:]) } return false } + +func (pubKey PubKey) Type() string { + return KeyType +} diff --git a/crypto/secp256k1/secp256k1_cgo.go b/crypto/secp256k1/secp256k1_cgo.go deleted file mode 100644 index 3d4a553ac..000000000 --- a/crypto/secp256k1/secp256k1_cgo.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build libsecp256k1 - -package secp256k1 - -import ( - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/secp256k1/internal/secp256k1" -) - -// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. -func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) { - rsv, err := secp256k1.Sign(crypto.Sha256(msg), privKey[:]) - if err != nil { - return nil, err - } - // we do not need v in r||s||v: - rs := rsv[:len(rsv)-1] - return rs, nil -} - -func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool { - return secp256k1.VerifySignature(pubKey[:], crypto.Sha256(msg), sig) -} diff --git a/crypto/secp256k1/secp256k1_cgo_test.go b/crypto/secp256k1/secp256k1_cgo_test.go deleted file mode 100644 index 96b026bc9..000000000 --- a/crypto/secp256k1/secp256k1_cgo_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build libsecp256k1 - -package secp256k1 - -import ( - "testing" - - "github.com/magiconair/properties/assert" - - "github.com/stretchr/testify/require" -) - -func TestPrivKeySecp256k1SignVerify(t *testing.T) { - msg := []byte("A.1.2 ECC Key Pair Generation by Testing Candidates") - priv := GenPrivKey() - tests := []struct { - name string - privKey PrivKeySecp256k1 - wantSignErr bool - wantVerifyPasses bool - }{ - {name: "valid sign-verify round", privKey: priv, wantSignErr: false, wantVerifyPasses: true}, - {name: "invalid private key", privKey: [32]byte{}, wantSignErr: true, wantVerifyPasses: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.privKey.Sign(msg) - if tt.wantSignErr { - require.Error(t, err) - t.Logf("Got error: %s", err) - return - } - require.NoError(t, err) - require.NotNil(t, got) - - pub := tt.privKey.PubKey() - assert.Equal(t, tt.wantVerifyPasses, pub.VerifyBytes(msg, got)) - }) - } -} diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go index 3103413f8..bceddc24f 100644 --- a/crypto/secp256k1/secp256k1_internal_test.go +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" + secp256k1 "github.com/btcsuite/btcd/btcec" ) func Test_genPrivKey(t *testing.T) { @@ -25,7 +25,7 @@ func Test_genPrivKey(t *testing.T) { shouldPanic bool }{ {"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true}, - {"curve order: N", underlyingSecp256k1.S256().N.Bytes(), true}, + {"curve order: N", secp256k1.S256().N.Bytes(), true}, {"valid because 0 < 1 < N", validOne, false}, } for _, tt := range tests { @@ -39,8 +39,36 @@ func Test_genPrivKey(t *testing.T) { } got := genPrivKey(bytes.NewReader(tt.notSoRand)) fe := new(big.Int).SetBytes(got[:]) - require.True(t, fe.Cmp(underlyingSecp256k1.S256().N) < 0) + require.True(t, fe.Cmp(secp256k1.S256().N) < 0) require.True(t, fe.Sign() > 0) }) } } + +// Ensure that signature verification works, and that +// non-canonical signatures fail. +// Note: run with CGO_ENABLED=0 or go test -tags !cgo. +func TestSignatureVerificationAndRejectUpperS(t *testing.T) { + msg := []byte("We have lingered long enough on the shores of the cosmic ocean.") + for i := 0; i < 500; i++ { + priv := GenPrivKey() + sigStr, err := priv.Sign(msg) + require.NoError(t, err) + sig := signatureFromBytes(sigStr) + require.False(t, sig.S.Cmp(secp256k1halfN) > 0) + + pub := priv.PubKey() + require.True(t, pub.VerifySignature(msg, sigStr)) + + // malleate: + sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S) + require.True(t, sig.S.Cmp(secp256k1halfN) > 0) + malSigStr := serializeSig(sig) + + require.False(t, pub.VerifySignature(msg, malSigStr), + "VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v", + sig, + priv, + ) + } +} diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index 18782b375..cba9bbe4c 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -18,26 +18,30 @@ var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. // The returned signature will be of the form R || S (in lower-S form). -func (privKey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) { - priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey[:]) +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey) + sig, err := priv.Sign(crypto.Sha256(msg)) if err != nil { return nil, err } + sigBytes := serializeSig(sig) return sigBytes, nil } -// VerifyBytes verifies a signature of the form R || S. +// VerifySignature verifies a signature of the form R || S. // It rejects signatures which are not in lower-S form. -func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool { +func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { if len(sigStr) != 64 { return false } - pub, err := secp256k1.ParsePubKey(pubKey[:], secp256k1.S256()) + + pub, err := secp256k1.ParsePubKey(pubKey, secp256k1.S256()) if err != nil { return false } + // parse the signature: signature := signatureFromBytes(sigStr) // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. @@ -45,6 +49,7 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sigStr []byte) bool { if signature.S.Cmp(secp256k1halfN) > 0 { return false } + return signature.Verify(crypto.Sha256(msg), pub) } diff --git a/crypto/secp256k1/secp256k1_nocgo_test.go b/crypto/secp256k1/secp256k1_nocgo_test.go deleted file mode 100644 index 17cb75815..000000000 --- a/crypto/secp256k1/secp256k1_nocgo_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !libsecp256k1 - -package secp256k1 - -import ( - "testing" - - secp256k1 "github.com/btcsuite/btcd/btcec" - "github.com/stretchr/testify/require" -) - -// Ensure that signature verification works, and that -// non-canonical signatures fail. -// Note: run with CGO_ENABLED=0 or go test -tags !cgo. -func TestSignatureVerificationAndRejectUpperS(t *testing.T) { - msg := []byte("We have lingered long enough on the shores of the cosmic ocean.") - for i := 0; i < 500; i++ { - priv := GenPrivKey() - sigStr, err := priv.Sign(msg) - require.NoError(t, err) - sig := signatureFromBytes(sigStr) - require.False(t, sig.S.Cmp(secp256k1halfN) > 0) - - pub := priv.PubKey() - require.True(t, pub.VerifyBytes(msg, sigStr)) - - // malleate: - sig.S.Sub(secp256k1.S256().CurveParams.N, sig.S) - require.True(t, sig.S.Cmp(secp256k1halfN) > 0) - malSigStr := serializeSig(sig) - - require.False(t, pub.VerifyBytes(msg, malSigStr), - "VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v", - sig, - priv, - ) - } -} diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index a83cd0f5f..83249ef6a 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -36,15 +36,14 @@ func TestPubKeySecp256k1Address(t *testing.T) { addrBbz, _, _ := base58.CheckDecode(d.addr) addrB := crypto.Address(addrBbz) - var priv secp256k1.PrivKeySecp256k1 - copy(priv[:], privB) + var priv secp256k1.PrivKey = secp256k1.PrivKey(privB) pubKey := priv.PubKey() - pubT, _ := pubKey.(secp256k1.PubKeySecp256k1) - pub := pubT[:] + pubT, _ := pubKey.(secp256k1.PubKey) + pub := pubT addr := pubKey.Address() - assert.Equal(t, pub, pubB, "Expected pub keys to match") + assert.Equal(t, pub, secp256k1.PubKey(pubB), "Expected pub keys to match") assert.Equal(t, addr, addrB, "Expected addresses to match") } } @@ -57,12 +56,12 @@ func TestSignAndValidateSecp256k1(t *testing.T) { sig, err := privKey.Sign(msg) require.Nil(t, err) - assert.True(t, pubKey.VerifyBytes(msg, sig)) + assert.True(t, pubKey.VerifySignature(msg, sig)) // Mutate the signature, just one bit. sig[3] ^= byte(0x01) - assert.False(t, pubKey.VerifyBytes(msg, sig)) + assert.False(t, pubKey.VerifySignature(msg, sig)) } // This test is intended to justify the removal of calls to the underlying library diff --git a/crypto/sr25519/codec.go b/crypto/sr25519/codec.go deleted file mode 100644 index f33b616f9..000000000 --- a/crypto/sr25519/codec.go +++ /dev/null @@ -1,30 +0,0 @@ -package sr25519 - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" -) - -var _ crypto.PrivKey = PrivKeySr25519{} - -const ( - PrivKeyAminoName = "tendermint/PrivKeySr25519" - PubKeyAminoName = "tendermint/PubKeySr25519" - - // SignatureSize is the size of an Edwards25519 signature. Namely the size of a compressed - // Sr25519 point, and a field element. Both of which are 32 bytes. - SignatureSize = 64 -) - -var cdc = amino.NewCodec() - -func init() { - cdc.RegisterInterface((*crypto.PubKey)(nil), nil) - cdc.RegisterConcrete(PubKeySr25519{}, - PubKeyAminoName, nil) - - cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) - cdc.RegisterConcrete(PrivKeySr25519{}, - PrivKeyAminoName, nil) -} diff --git a/crypto/sr25519/encoding.go b/crypto/sr25519/encoding.go new file mode 100644 index 000000000..41570b5d0 --- /dev/null +++ b/crypto/sr25519/encoding.go @@ -0,0 +1,23 @@ +package sr25519 + +import ( + "github.com/tendermint/tendermint/crypto" + tmjson "github.com/tendermint/tendermint/libs/json" +) + +var _ crypto.PrivKey = PrivKey{} + +const ( + PrivKeyName = "tendermint/PrivKeySr25519" + PubKeyName = "tendermint/PubKeySr25519" + + // SignatureSize is the size of an Edwards25519 signature. Namely the size of a compressed + // Sr25519 point, and a field element. Both of which are 32 bytes. + SignatureSize = 64 +) + +func init() { + + tmjson.RegisterType(PubKey{}, PubKeyName) + tmjson.RegisterType(PrivKey{}, PrivKeyName) +} diff --git a/crypto/sr25519/privkey.go b/crypto/sr25519/privkey.go index 17d33ebf2..e77ca375c 100644 --- a/crypto/sr25519/privkey.go +++ b/crypto/sr25519/privkey.go @@ -10,20 +10,22 @@ import ( schnorrkel "github.com/ChainSafe/go-schnorrkel" ) -// PrivKeySr25519Size is the number of bytes in an Sr25519 private key. -const PrivKeySr25519Size = 32 +// PrivKeySize is the number of bytes in an Sr25519 private key. +const PrivKeySize = 32 // PrivKeySr25519 implements crypto.PrivKey. -type PrivKeySr25519 [PrivKeySr25519Size]byte +type PrivKey []byte -// Bytes marshals the privkey using amino encoding. -func (privKey PrivKeySr25519) Bytes() []byte { - return cdc.MustMarshalBinaryBare(privKey) +// Bytes returns the byte representation of the PrivKey. +func (privKey PrivKey) Bytes() []byte { + return []byte(privKey) } // Sign produces a signature on the provided message. -func (privKey PrivKeySr25519) Sign(msg []byte) ([]byte, error) { - miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(privKey) +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + var p [PrivKeySize]byte + copy(p[:], privKey) + miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p) if err != nil { return []byte{}, err } @@ -41,8 +43,10 @@ func (privKey PrivKeySr25519) Sign(msg []byte) ([]byte, error) { } // PubKey gets the corresponding public key from the private key. -func (privKey PrivKeySr25519) PubKey() crypto.PubKey { - miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(privKey) +func (privKey PrivKey) PubKey() crypto.PubKey { + var p [PrivKeySize]byte + copy(p[:], privKey) + miniSecretKey, err := schnorrkel.NewMiniSecretKeyFromRaw(p) if err != nil { panic(fmt.Sprintf("Invalid private key: %v", err)) } @@ -52,28 +56,32 @@ func (privKey PrivKeySr25519) PubKey() crypto.PubKey { if err != nil { panic(fmt.Sprintf("Could not generate public key: %v", err)) } - - return PubKeySr25519(pubkey.Encode()) + key := pubkey.Encode() + return PubKey(key[:]) } // Equals - you probably don't need to use this. // Runs in constant time based on length of the keys. -func (privKey PrivKeySr25519) Equals(other crypto.PrivKey) bool { - if otherEd, ok := other.(PrivKeySr25519); ok { +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherEd, ok := other.(PrivKey); ok { return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 } return false } +func (privKey PrivKey) Type() string { + return keyType +} + // GenPrivKey generates a new sr25519 private key. // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. -func GenPrivKey() PrivKeySr25519 { +func GenPrivKey() PrivKey { return genPrivKey(crypto.CReader()) } // genPrivKey generates a new sr25519 private key using the provided reader. -func genPrivKey(rand io.Reader) PrivKeySr25519 { +func genPrivKey(rand io.Reader) PrivKey { var seed [64]byte out := make([]byte, 64) @@ -84,17 +92,19 @@ func genPrivKey(rand io.Reader) PrivKeySr25519 { copy(seed[:], out) - return schnorrkel.NewMiniSecretKey(seed).ExpandEd25519().Encode() + key := schnorrkel.NewMiniSecretKey(seed).ExpandEd25519().Encode() + return key[:] } // GenPrivKeyFromSecret hashes the secret with SHA2, and uses // that 32 byte output to create the private key. // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. -func GenPrivKeyFromSecret(secret []byte) PrivKeySr25519 { +func GenPrivKeyFromSecret(secret []byte) PrivKey { seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - var bz [PrivKeySr25519Size]byte + var bz [PrivKeySize]byte copy(bz[:], seed) privKey, _ := schnorrkel.NewMiniSecretKeyFromRaw(bz) - return privKey.ExpandEd25519().Encode() + key := privKey.ExpandEd25519().Encode() + return key[:] } diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go index a678806f2..87805cacb 100644 --- a/crypto/sr25519/pubkey.go +++ b/crypto/sr25519/pubkey.go @@ -10,29 +10,28 @@ import ( schnorrkel "github.com/ChainSafe/go-schnorrkel" ) -var _ crypto.PubKey = PubKeySr25519{} +var _ crypto.PubKey = PubKey{} -// PubKeySr25519Size is the number of bytes in an Sr25519 public key. -const PubKeySr25519Size = 32 +// PubKeySize is the number of bytes in an Sr25519 public key. +const ( + PubKeySize = 32 + keyType = "sr25519" +) // PubKeySr25519 implements crypto.PubKey for the Sr25519 signature scheme. -type PubKeySr25519 [PubKeySr25519Size]byte +type PubKey []byte // Address is the SHA256-20 of the raw pubkey bytes. -func (pubKey PubKeySr25519) Address() crypto.Address { +func (pubKey PubKey) Address() crypto.Address { return crypto.Address(tmhash.SumTruncated(pubKey[:])) } -// Bytes marshals the PubKey using amino encoding. -func (pubKey PubKeySr25519) Bytes() []byte { - bz, err := cdc.MarshalBinaryBare(pubKey) - if err != nil { - panic(err) - } - return bz +// Bytes returns the byte representation of the PubKey. +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) } -func (pubKey PubKeySr25519) VerifyBytes(msg []byte, sig []byte) bool { +func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { // make sure we use the same algorithm to sign if len(sig) != SignatureSize { return false @@ -41,7 +40,9 @@ func (pubKey PubKeySr25519) VerifyBytes(msg []byte, sig []byte) bool { copy(sig64[:], sig) publicKey := &(schnorrkel.PublicKey{}) - err := publicKey.Decode(pubKey) + var p [PubKeySize]byte + copy(p[:], pubKey) + err := publicKey.Decode(p) if err != nil { return false } @@ -57,15 +58,20 @@ func (pubKey PubKeySr25519) VerifyBytes(msg []byte, sig []byte) bool { return publicKey.Verify(signature, signingContext) } -func (pubKey PubKeySr25519) String() string { - return fmt.Sprintf("PubKeySr25519{%X}", pubKey[:]) +func (pubKey PubKey) String() string { + return fmt.Sprintf("PubKeySr25519{%X}", []byte(pubKey)) } // Equals - checks that two public keys are the same time // Runs in constant time based on length of the keys. -func (pubKey PubKeySr25519) Equals(other crypto.PubKey) bool { - if otherEd, ok := other.(PubKeySr25519); ok { +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherEd, ok := other.(PubKey); ok { return bytes.Equal(pubKey[:], otherEd[:]) } return false } + +func (pubKey PubKey) Type() string { + return keyType + +} diff --git a/crypto/sr25519/sr25519_test.go b/crypto/sr25519/sr25519_test.go index 62cf564a9..1efe31cad 100644 --- a/crypto/sr25519/sr25519_test.go +++ b/crypto/sr25519/sr25519_test.go @@ -20,12 +20,12 @@ func TestSignAndValidateSr25519(t *testing.T) { require.Nil(t, err) // Test the signature - assert.True(t, pubKey.VerifyBytes(msg, sig)) - assert.True(t, pubKey.VerifyBytes(msg, sig)) + assert.True(t, pubKey.VerifySignature(msg, sig)) + assert.True(t, pubKey.VerifySignature(msg, sig)) // Mutate the signature, just one bit. // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 sig[7] ^= byte(0x01) - assert.False(t, pubKey.VerifyBytes(msg, sig)) + assert.False(t, pubKey.VerifySignature(msg, sig)) } diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 57fd0faa5..cf9991b3b 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -12,13 +13,15 @@ import ( func TestHash(t *testing.T) { testVector := []byte("abc") hasher := tmhash.New() - hasher.Write(testVector) + _, err := hasher.Write(testVector) + require.NoError(t, err) bz := hasher.Sum(nil) bz2 := tmhash.Sum(testVector) hasher = sha256.New() - hasher.Write(testVector) + _, err = hasher.Write(testVector) + require.NoError(t, err) bz3 := hasher.Sum(nil) assert.Equal(t, bz, bz2) @@ -28,13 +31,15 @@ func TestHash(t *testing.T) { func TestHashTruncated(t *testing.T) { testVector := []byte("abc") hasher := tmhash.NewTruncated() - hasher.Write(testVector) + _, err := hasher.Write(testVector) + require.NoError(t, err) bz := hasher.Sum(nil) bz2 := tmhash.SumTruncated(testVector) hasher = sha256.New() - hasher.Write(testVector) + _, err = hasher.Write(testVector) + require.NoError(t, err) bz3 := hasher.Sum(nil) bz3 = bz3[:tmhash.TruncatedSize] diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go index c7a175b5f..2578520a5 100644 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ b/crypto/xchacha20poly1305/xchachapoly.go @@ -47,12 +47,10 @@ func New(key []byte) (cipher.AEAD, error) { return ret, nil } -// nolint func (c *xchacha20poly1305) NonceSize() int { return NonceSize } -// nolint func (c *xchacha20poly1305) Overhead() int { return TagSize } diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go index 18779ae16..d5e4b6003 100644 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ b/crypto/xchacha20poly1305/xchachapoly_test.go @@ -23,10 +23,22 @@ func TestRandom(t *testing.T) { pl := mr.Intn(16384) ad := make([]byte, al) plaintext := make([]byte, pl) - cr.Read(key[:]) - cr.Read(nonce[:]) - cr.Read(ad) - cr.Read(plaintext) + _, err := cr.Read(key[:]) + if err != nil { + t.Errorf("error on read: %w", err) + } + _, err = cr.Read(nonce[:]) + if err != nil { + t.Errorf("error on read: %w", err) + } + _, err = cr.Read(ad) + if err != nil { + t.Errorf("error on read: %w", err) + } + _, err = cr.Read(plaintext) + if err != nil { + t.Errorf("error on read: %w", err) + } aead, err := New(key[:]) if err != nil { diff --git a/docs/.python-version b/docs/.python-version deleted file mode 100644 index 9bbf49249..000000000 --- a/docs/.python-version +++ /dev/null @@ -1 +0,0 @@ -2.7.14 diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 55613252f..a5e705bf3 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -1,6 +1,6 @@ module.exports = { - theme: "cosmos", - title: "Tendermint", + theme: 'cosmos', + title: 'Tendermint Core', // locales: { // "/": { // lang: "en-US" @@ -11,130 +11,154 @@ module.exports = { // }, base: process.env.VUEPRESS_BASE, themeConfig: { - docsRepo: "tendermint/tendermint", + repo: 'tendermint/tendermint', + docsRepo: 'tendermint/tendermint', + docsDir: 'docs', editLinks: true, - docsDir: "docs", - logo: "/logo.svg", - label: "core", + label: 'core', + algolia: { + id: "BH4D9OD16A", + key: "59f0e2deb984aa9cdf2b3a5fd24ac501", + index: "tendermint" + }, + topbar: { + banner: false, + }, + sidebar: { + auto: true, + nav: [ + { + title: 'Resources', + children: [ + { + title: 'Developer Sessions', + path: '/DEV_SESSIONS.html' + }, + { + title: 'RPC', + path: 'https://docs.tendermint.com/master/rpc/', + static: true + }, + // TODO: remove once https://github.com/cosmos/vuepress-theme-cosmos/issues/91 is closed + { + title: 'Version 0.32', + path: '/v0.32', + static: true + }, + { + title: 'Version 0.33', + path: '/v0.33', + static: true + }, + ] + } + ] + }, gutter: { - title: "Help & Support", + title: 'Help & Support', editLink: true, forum: { - title: "Tendermint Forum", - text: "Join the Tendermint forum to learn more", - url: "https://forum.cosmos.network/c/tendermint", - bg: "#0B7E0B", - logo: "tendermint" + title: 'Tendermint Forum', + text: 'Join the Tendermint forum to learn more', + url: 'https://forum.cosmos.network/c/tendermint', + bg: '#0B7E0B', + logo: 'tendermint' }, github: { - title: "Found an Issue?", - text: "Help us improve this page by suggesting edits on GitHub." + title: 'Found an Issue?', + text: 'Help us improve this page by suggesting edits on GitHub.' } }, footer: { - questionsText: "Chat with Cosmos developers in [Discord](https://discordapp.com/channels/669268347736686612) or reach out on the [SDK Developer Forum](https://forum.cosmos.network/c/tendermint) to learn more.", - logo: "/logo-bw.svg", + question: { + text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' + }, + logo: '/logo-bw.svg', textLink: { - text: "tendermint.com", - url: "https://tendermint.com" + text: 'tendermint.com', + url: 'https://tendermint.com' }, services: [ { - service: "medium", - url: "https://medium.com/@tendermint" + service: 'medium', + url: 'https://medium.com/@tendermint' }, { - service: "twitter", - url: "https://twitter.com/tendermint_team" + service: 'twitter', + url: 'https://twitter.com/tendermint_team' }, { - service: "linkedin", - url: "https://www.linkedin.com/company/tendermint/" + service: 'linkedin', + url: 'https://www.linkedin.com/company/tendermint/' }, { - service: "reddit", - url: "https://reddit.com/r/cosmosnetwork" + service: 'reddit', + url: 'https://reddit.com/r/cosmosnetwork' }, { - service: "telegram", - url: "https://t.me/cosmosproject" + service: 'telegram', + url: 'https://t.me/cosmosproject' }, { - service: "youtube", - url: "https://www.youtube.com/c/CosmosProject" + service: 'youtube', + url: 'https://www.youtube.com/c/CosmosProject' } ], smallprint: - "The development of the Tendermint project is led primarily by Tendermint Inc., the for-profit entity which also maintains this website. Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit.", + 'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.', links: [ { - title: "Documentation", + title: 'Documentation', children: [ { - title: "Cosmos SDK", - url: "https://cosmos.network/docs" + title: 'Cosmos SDK', + url: 'https://docs.cosmos.network' }, { - title: "Cosmos Hub", - url: "https://hub.cosmos.network/" + title: 'Cosmos Hub', + url: 'https://hub.cosmos.network' } ] }, { - title: "Community", + title: 'Community', children: [ { - title: "Tendermint blog", - url: "https://medium.com/@tendermint" + title: 'Tendermint blog', + url: 'https://medium.com/@tendermint' }, { - title: "Forum", - url: "https://forum.cosmos.network/c/tendermint" + title: 'Forum', + url: 'https://forum.cosmos.network/c/tendermint' } ] }, { - title: "Contributing", + title: 'Contributing', children: [ { - title: "Contributing to the docs", - url: "https://github.com/tendermint/tendermint" + title: 'Contributing to the docs', + url: 'https://github.com/tendermint/tendermint' }, { - title: "Source code on GitHub", - url: "https://github.com/tendermint/tendermint" + title: 'Source code on GitHub', + url: 'https://github.com/tendermint/tendermint' }, { - title: "Careers at Tendermint", - url: "https://tendermint.com/careers" + title: 'Careers at Tendermint', + url: 'https://tendermint.com/careers' } ] } ] - }, - sidebar: [ - { - title: "Resources", - children: [ - { - title: "Developer Sessions", - path: "/DEV_SESSIONS.html" - }, - { - title: "RPC", - path: "/master/rpc/", - static: true - } - ] - } - ] + } }, plugins: [ [ - "@vuepress/google-analytics", + '@vuepress/google-analytics', { - ga: "UA-51029217-11" + ga: 'UA-51029217-11' } ] - ], + ] }; diff --git a/docs/.vuepress/public/logo.svg b/docs/.vuepress/public/logo.svg deleted file mode 100644 index a6e0024fc..000000000 --- a/docs/.vuepress/public/logo.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/docs/.vuepress/styles/index.styl b/docs/.vuepress/styles/index.styl index 0ca835191..ecca3f715 100644 --- a/docs/.vuepress/styles/index.styl +++ b/docs/.vuepress/styles/index.styl @@ -1,3 +1,3 @@ :root - --accent-color #018A01 - --background #222222 \ No newline at end of file + --color-link #018A01 + --color-primary #00BB00 diff --git a/docs/DEV_SESSIONS.md b/docs/DEV_SESSIONS.md deleted file mode 100644 index 1ff4e3641..000000000 --- a/docs/DEV_SESSIONS.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -order: 1 ---- - -# Developer Sessions - -The Tendermint Core developer call is comprised of both [Interchain -Foundation](http://interchain.io/) and [All in Bits](https://tendermint.com/) -team members discussing the development of [Tendermint -BFT](https://github.com/tendermint/tendermint) and related research. The goal -of the Tendermint Core developer calls is to provide transparency into the -decision making process, technical information, update cycles etc. - -## List - -| Date | Topic | Link(s) | -| -------------- | ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -| October 2019 | ABCI Overview (2/2) | [Youtube](https://www.youtube.com/watch?v=K3-E5wj2jA8) | -| October 2019 | ABCI Overview (1/2) | [YouTube](https://www.youtube.com/watch?v=I3OnA8yCHl4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | -| September 2019 | IAVL+ Presentation | [YouTube](https://www.youtube.com/watch?v=e5wwBaCTc9Y&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | -| September 2019 | Tendermint Dev Session - Blockchain Reactor in TLA+ | [YouTube](https://www.youtube.com/watch?v=q0e0pEQ5aiY&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=3) | -| September 2019 | Tendermint Code Review - SkipTimeoutCommit & Block Rollback | [YouTube](https://www.youtube.com/watch?v=MCo_oH7rys8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| September 2019 | Tendermint Evidence Handling | [YouTube](https://www.youtube.com/watch?v=-4H3_DVlYRk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| August 2019 | Part Three: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=whyL6UrKe7I&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| August 2019 | Fork Accountability | [YouTube](https://www.youtube.com/watch?v=Jph-4PGtdPo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| July 2019 | Part Two: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=gTjG7jNNdKQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | -| July 2019 | Part One: Tendermint Lite Client | [YouTube](https://www.youtube.com/watch?v=C6fH_sgPJzA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | -| June 2019 | Testnet Deployments | [YouTube](https://www.youtube.com/watch?v=gYA6no7tRlM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | -| June 2019 | Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=JLBGH8yxABk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | -| June 2019 | Tendermint Rust Libraries | [YouTube](https://www.youtube.com/watch?v=-WXKdyoGHwA&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | -| May 2019 | Merkle Tree Deep Dive | [YouTube](https://www.youtube.com/watch?v=L3bt2Uw8ICg&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | -| May 2019 | Remote Signer Refactor | [YouTube](https://www.youtube.com/watch?v=eUyXXEEuBzQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=12) | -| May 2019 | Introduction to Ansible | [YouTube](https://www.youtube.com/watch?v=72clQLjzPg4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=14&t=0s) | -| April 2019 | Tendermint State Sync Design Discussion | [YouTube](https://www.youtube.com/watch?v=4k23j2QHwrM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=11) | -| April 2019 | ADR-036 - Blockchain Reactor Refactor | [YouTube](https://www.youtube.com/watch?v=TW2xC1LwEkE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=10) | -| April 2019 | Verifying Distributed Algorithms | [YouTube](https://www.youtube.com/watch?v=tMd4lgPVBxE&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=9) | -| April 2019 | Byzantine Model Checker Presentation | [YouTube](https://www.youtube.com/watch?v=rdXl4VCQyow&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=8) | -| January 2019 | Proposer Selection in Idris | [YouTube](https://www.youtube.com/watch?v=hWZdc9c1aH8&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=7) | -| January 2019 | Current Mempool Design | [YouTube](https://www.youtube.com/watch?v=--iGIYYiLu4&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=6) | -| December 2018 | ABCI Proxy App | [YouTube](https://www.youtube.com/watch?v=s6sQ2HOVHdo&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=5) | -| October 2018 | DB Performance | [YouTube](https://www.youtube.com/watch?v=jVSNHi4l0fQ&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=4) | -| October 2018 | Alternative Mempool Algorithms | [YouTube](https://www.youtube.com/watch?v=XxH5ZtM4vMM&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv&index=2) | -| October 2018 | Tendermint Termination | [YouTube](https://www.youtube.com/watch?v=YBZjecfjeIk&list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv) | diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index ae16c18c9..04883e462 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,7 +2,7 @@ The documentation for Tendermint Core is hosted at: -- https://docs.tendermint.com/master/ +- built from the files in this (`/docs`) directory for [master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. @@ -61,25 +61,25 @@ to send users to the GitHub. Make sure you are in the `docs` directory and run the following commands: -```sh +```bash rm -rf node_modules ``` This command will remove old version of the visual theme and required packages. This step is optional. -```sh +```bash npm install ``` Install the theme and all dependencies. -```sh +```bash npm run serve ``` -Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often https://localhost:8080). +Run `pre` and `post` hooks and start a hot-reloading web-server. See output of this command for the URL (it is often ). diff --git a/docs/README.md b/docs/README.md index 1b7d691e3..7cd5f68d4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,3 +1,10 @@ +--- +title: Tendermint Core Documentation +description: Tendermint Core is a blockchain application platform. +footer: + newsletter: false +--- + # Tendermint Welcome to the Tendermint Core documentation! @@ -23,5 +30,4 @@ To find out about the Tendermint ecosystem you can go [here](https://github.com/ ## Contribute -To contribute to the documentation, see [this file](https://github.com/tendermint/tendermint/blob/master/docs/DOCS_README.md) for details of the build process and -considerations when making changes. +To contribute to the documentation, see [this file](https://github.com/tendermint/tendermint/blob/master/docs/DOCS_README.md) for details of the build process and considerations when making changes. diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index ec8b0abf3..18a7984f7 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -14,9 +14,7 @@ Make sure you [have Go installed](https://golang.org/doc/install). Next, install the `abci-cli` tool and example applications: -``` -mkdir -p $GOPATH/src/github.com/tendermint -cd $GOPATH/src/github.com/tendermint +```sh git clone https://github.com/tendermint/tendermint.git cd tendermint make tools @@ -25,7 +23,7 @@ make install_abci Now run `abci-cli` to see the list of commands: -``` +```sh Usage: abci-cli [command] @@ -69,7 +67,7 @@ Its code can be found [here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go) and looks like: -``` +```go func cmdKVStore(cmd *cobra.Command, args []string) error { logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) @@ -105,27 +103,27 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { Start by running: -``` +```sh abci-cli kvstore ``` And in another terminal, run -``` +```sh abci-cli echo hello abci-cli info ``` You'll see something like: -``` +```sh -> data: hello -> data.hex: 68656C6C6F ``` and: -``` +```sh -> data: {"size":0} -> data.hex: 7B2273697A65223A307D ``` @@ -162,7 +160,7 @@ speaking ABCI messages to your application. Try running these commands: -``` +```sh > echo hello -> code: OK -> data: hello @@ -226,7 +224,7 @@ Like the kvstore app, its code can be found [here](https://github.com/tendermint/tendermint/blob/master/abci/cmd/abci-cli/abci-cli.go) and looks like: -``` +```go func cmdCounter(cmd *cobra.Command, args []string) error { app := counter.NewCounterApplication(flagSerial) @@ -280,16 +278,13 @@ whose integer is greater than the last committed one. Let's kill the console and the kvstore application, and start the counter app: -``` +```sh abci-cli counter ``` In another window, start the `abci-cli console`: -``` -> set_option serial on --> code: OK --> log: OK (SetOption doesn't return anything.) +```sh > check_tx 0x00 -> code: OK @@ -332,7 +327,7 @@ example directory](https://github.com/tendermint/tendermint/tree/master/abci/exa To run the Node.js version, fist download & install [the Javascript ABCI server](https://github.com/tendermint/js-abci): -``` +```sh git clone https://github.com/tendermint/js-abci.git cd js-abci npm install abci @@ -340,7 +335,7 @@ npm install abci Now you can start the app: -```bash +```sh node example/counter.js ``` diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index d98e279dc..ec2822688 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -9,7 +9,7 @@ Tendermint blockchain application. The following diagram provides a superb example: -![](../imgs/cosmos-tendermint-stack-4k.jpg) +![cosmos-tendermint-stack](../imgs/cosmos-tendermint-stack-4k.jpg) We distinguish here between two forms of "application". The first is the end-user application, like a desktop-based wallet app that a user downloads, @@ -18,10 +18,9 @@ ABCI application, which is the logic that actually runs on the blockchain. Transactions sent by an end-user application are ultimately processed by the ABCI application after being committed by the Tendermint consensus. -The end-user application in this diagram is the Cosmos Voyager, at the bottom -left. Voyager communicates with a REST API exposed by a local Light-Client -Daemon. The Light-Client Daemon is an application specific program that -communicates with Tendermint nodes and verifies Tendermint light-client proofs +The end-user application in this diagram is the [Lunie](https://lunie.io/) app, located at the bottom +left. Lunie communicates with a REST API exposed by the application. +The application with Tendermint nodes and verifies Tendermint light-client proofs through the Tendermint Core RPC. The Tendermint Core process communicates with a local ABCI application, where the user query or transaction is actually processed. diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md deleted file mode 100644 index 9c1acc289..000000000 --- a/docs/app-dev/app-development.md +++ /dev/null @@ -1,507 +0,0 @@ ---- -order: 4 ---- - -# Application Development Guide - -## XXX - -This page is undergoing deprecation. All content is being moved to the new [home -of the ABCI specification](https://github.com/tendermint/spec/tree/master/spec/abci). - -## ABCI Design - -The purpose of ABCI is to provide a clean interface between state -transition machines on one computer and the mechanics of their -replication across multiple computers. The former we call 'application -logic' and the latter the 'consensus engine'. Application logic -validates transactions and optionally executes transactions against some -persistent state. A consensus engine ensures all transactions are -replicated in the same order on every machine. We call each machine in a -consensus engine a 'validator', and each validator runs the same -transactions through the same application logic. In particular, we are -interested in blockchain-style consensus engines, where transactions are -committed in hash-linked blocks. - -The ABCI design has a few distinct components: - -- message protocol - - pairs of request and response messages - - consensus makes requests, application responds - - defined using protobuf -- server/client - - consensus engine runs the client - - application runs the server - - two implementations: - - async raw bytes - - grpc -- blockchain protocol - - abci is connection oriented - - Tendermint Core maintains three connections: - - [mempool connection](#mempool-connection): for checking if - transactions should be relayed before they are committed; - only uses `CheckTx` - - [consensus connection](#consensus-connection): for executing - transactions that have been committed. Message sequence is - -for every block -`BeginBlock, [DeliverTx, ...], EndBlock, Commit` - - [query connection](#query-connection): for querying the - application state; only uses Query and Info - -The mempool and consensus logic act as clients, and each maintains an -open ABCI connection with the application, which hosts an ABCI server. -Shown are the request and response types sent on each connection. - -Most of the examples below are from [kvstore -application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/kvstore.go), -which is a part of the abci repo. [persistent_kvstore -application](https://github.com/tendermint/tendermint/blob/master/abci/example/kvstore/persistent_kvstore.go) -is used to show `BeginBlock`, `EndBlock` and `InitChain` example -implementations. - -## Blockchain Protocol - -In ABCI, a transaction is simply an arbitrary length byte-array. It is -the application's responsibility to define the transaction codec as they -please, and to use it for both CheckTx and DeliverTx. - -Note that there are two distinct means for running transactions, -corresponding to stages of 'awareness' of the transaction in the -network. The first stage is when a transaction is received by a -validator from a client into the so-called mempool or transaction pool --this is where we use CheckTx. The second is when the transaction is -successfully committed on more than 2/3 of validators - where we use -DeliverTx. In the former case, it may not be necessary to run all the -state transitions associated with the transaction, as the transaction -may not ultimately be committed until some much later time, when the -result of its execution will be different. For instance, an Ethereum -ABCI app would check signatures and amounts in CheckTx, but would not -actually execute any contract code until the DeliverTx, so as to avoid -executing state transitions that have not been finalized. - -To formalize the distinction further, two explicit ABCI connections are -made between Tendermint Core and the application: the mempool connection -and the consensus connection. We also make a third connection, the query -connection, to query the local state of the app. - -### Mempool Connection - -The mempool connection is used _only_ for CheckTx requests. Transactions -are run using CheckTx in the same order they were received by the -validator. If the CheckTx returns `OK`, the transaction is kept in -memory and relayed to other peers in the same order it was received. -Otherwise, it is discarded. - -CheckTx requests run concurrently with block processing; so they should -run against a copy of the main application state which is reset after -every block. This copy is necessary to track transitions made by a -sequence of CheckTx requests before they are included in a block. When a -block is committed, the application must ensure to reset the mempool -state to the latest committed state. Tendermint Core will then filter -through all transactions in the mempool, removing any that were included -in the block, and re-run the rest using CheckTx against the post-Commit -mempool state (this behaviour can be turned off with -`[mempool] recheck = false`). - -In go: - -``` -func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { - return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} -} -``` - -In Java: - -``` -ResponseCheckTx requestCheckTx(RequestCheckTx req) { - byte[] transaction = req.getTx().toByteArray(); - - // validate transaction - - if (notValid) { - return ResponseCheckTx.newBuilder().setCode(CodeType.BadNonce).setLog("invalid tx").build(); - } else { - return ResponseCheckTx.newBuilder().setCode(CodeType.OK).build(); - } -} -``` - -### Replay Protection - -To prevent old transactions from being replayed, CheckTx must implement -replay protection. - -Tendermint provides the first defence layer by keeping a lightweight -in-memory cache of 100k (`[mempool] cache_size`) last transactions in -the mempool. If Tendermint is just started or the clients sent more than -100k transactions, old transactions may be sent to the application. So -it is important CheckTx implements some logic to handle them. - -If there are cases in your application where a transaction may become invalid in some -future state, you probably want to disable Tendermint's -cache. You can do that by setting `[mempool] cache_size = 0` in the -config. - -### Consensus Connection - -The consensus connection is used only when a new block is committed, and -communicates all information from the block in a series of requests: -`BeginBlock, [DeliverTx, ...], EndBlock, Commit`. That is, when a block -is committed in the consensus, we send a list of DeliverTx requests (one -for each transaction) sandwiched by BeginBlock and EndBlock requests, -and followed by a Commit. - -### DeliverTx - -DeliverTx is the workhorse of the blockchain. Tendermint sends the -DeliverTx requests asynchronously but in order, and relies on the -underlying socket protocol (ie. TCP) to ensure they are received by the -app in order. They have already been ordered in the global consensus by -the Tendermint protocol. - -DeliverTx returns a abci.Result, which includes a Code, Data, and Log. -The code may be non-zero (non-OK), meaning the corresponding transaction -should have been rejected by the mempool, but may have been included in -a block by a Byzantine proposer. - -The block header will be updated (TODO) to include some commitment to -the results of DeliverTx, be it a bitarray of non-OK transactions, or a -merkle root of the data returned by the DeliverTx requests, or both. - -In go: - -``` -// tx is either "key=value" or just arbitrary bytes -func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { - var key, value []byte - parts := bytes.Split(req.Tx, []byte("=")) - if len(parts) == 2 { - key, value = parts[0], parts[1] - } else { - key, value = req.Tx, req.Tx - } - - app.state.db.Set(prefixKey(key), value) - app.state.Size += 1 - - events := []types.Event{ - { - Type: "app", - Attributes: []kv.Pair{ - {Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")}, - {Key: []byte("key"), Value: key}, - }, - }, - } - - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} -} -``` - -In Java: - -``` -/** - * Using Protobuf types from the protoc compiler, we always start with a byte[] - */ -ResponseDeliverTx deliverTx(RequestDeliverTx request) { - byte[] transaction = request.getTx().toByteArray(); - - // validate your transaction - - if (notValid) { - return ResponseDeliverTx.newBuilder().setCode(CodeType.BadNonce).setLog("transaction was invalid").build(); - } else { - ResponseDeliverTx.newBuilder().setCode(CodeType.OK).build(); - } - -} -``` - -### Commit - -Once all processing of the block is complete, Tendermint sends the -Commit request and blocks waiting for a response. While the mempool may -run concurrently with block processing (the BeginBlock, DeliverTxs, and -EndBlock), it is locked for the Commit request so that its state can be -safely updated during Commit. This means the app _MUST NOT_ do any -blocking communication with the mempool (ie. broadcast_tx) during -Commit, or there will be deadlock. Note also that all remaining -transactions in the mempool are replayed on the mempool connection -(CheckTx) following a commit. - -The app should respond to the Commit request with a byte array, which is -the deterministic state root of the application. It is included in the -header of the next block. It can be used to provide easily verified -Merkle-proofs of the state of the application. - -It is expected that the app will persist state to disk on Commit. The -option to have all transactions replayed from some previous block is the -job of the [Handshake](#handshake). - -In go: - -``` -func (app *KVStoreApplication) Commit() types.ResponseCommit { - // Using a memdb - just return the big endian size of the db - appHash := make([]byte, 8) - binary.PutVarint(appHash, app.state.Size) - app.state.AppHash = appHash - app.state.Height += 1 - saveState(app.state) - return types.ResponseCommit{Data: appHash} -} -``` - -In Java: - -``` -ResponseCommit requestCommit(RequestCommit requestCommit) { - - // update the internal app-state - byte[] newAppState = calculateAppState(); - - // and return it to the node - return ResponseCommit.newBuilder().setCode(CodeType.OK).setData(ByteString.copyFrom(newAppState)).build(); -} -``` - -### BeginBlock - -The BeginBlock request can be used to run some code at the beginning of -every block. It also allows Tendermint to send the current block hash -and header to the application, before it sends any of the transactions. - -The app should remember the latest height and header (ie. from which it -has run a successful Commit) so that it can tell Tendermint where to -pick up from when it restarts. See information on the Handshake, below. - -In go: - -``` -// Track the block hash and header information -func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { - // reset valset changes - app.ValUpdates = make([]types.ValidatorUpdate, 0) - return types.ResponseBeginBlock{} -} -``` - -In Java: - -``` -/* - * all types come from protobuf definition - */ -ResponseBeginBlock requestBeginBlock(RequestBeginBlock req) { - - Header header = req.getHeader(); - byte[] prevAppHash = header.getAppHash().toByteArray(); - long prevHeight = header.getHeight(); - - // run your pre-block logic. Maybe prepare a state snapshot, message components, etc - - return ResponseBeginBlock.newBuilder().build(); -} -``` - -### EndBlock - -The EndBlock request can be used to run some code at the end of every block. -Additionally, the response may contain a list of validators, which can be used -to update the validator set. To add a new validator or update an existing one, -simply include them in the list returned in the EndBlock response. To remove -one, include it in the list with a `power` equal to `0`. Validator's `address` -field can be left empty. Tendermint core will take care of updating the -validator set. Note the change in voting power must be strictly less than 1/3 -per block if you want a light client to be able to prove the transition -externally. See the [light client -docs](https://godoc.org/github.com/tendermint/tendermint/lite#hdr-How_We_Track_Validators) -for details on how it tracks validators. - -In go: - -``` -// Update the validator set -func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} -} -``` - -In Java: - -``` -/* - * Assume that one validator changes. The new validator has a power of 10 - */ -ResponseEndBlock requestEndBlock(RequestEndBlock req) { - final long currentHeight = req.getHeight(); - final byte[] validatorPubKey = getValPubKey(); - - ResponseEndBlock.Builder builder = ResponseEndBlock.newBuilder(); - builder.addDiffs(1, Types.Validator.newBuilder().setPower(10L).setPubKey(ByteString.copyFrom(validatorPubKey)).build()); - - return builder.build(); -} -``` - -### Query Connection - -This connection is used to query the application without engaging -consensus. It's exposed over the tendermint core rpc, so clients can -query the app without exposing a server on the app itself, but they must -serialize each query as a single byte array. Additionally, certain -"standardized" queries may be used to inform local decisions, for -instance about which peers to connect to. - -Tendermint Core currently uses the Query connection to filter peers upon -connecting, according to IP address or node ID. For instance, -returning non-OK ABCI response to either of the following queries will -cause Tendermint to not connect to the corresponding peer: - -- `p2p/filter/addr/`, where `` is an IP address. -- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of - the node's p2p pubkey). - -Note: these query formats are subject to change! - -In go: - -``` -func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value := app.state.db.Get(prefixKey(reqQuery.Data)) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - if value != nil { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } else { - resQuery.Key = reqQuery.Data - value := app.state.db.Get(prefixKey(reqQuery.Data)) - resQuery.Value = value - if value != nil { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } -} -``` - -In Java: - -``` - ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); - byte[] queryData = req.getData().toByteArray(); - - if (isProveQuery) { - com.app.example.QueryResultWithProof result = generateQueryResultWithProof(queryData); - responseBuilder.setIndex(result.getLeftIndex()); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(result.getValueOrNull(0)); - responseBuilder.setHeight(result.getHeight()); - responseBuilder.setProof(result.getProof()); - responseBuilder.setLog(result.getLogValue()); - } else { - com.app.example.QueryResult result = generateQueryResult(queryData); - responseBuilder.setIndex(result.getIndex()); - responseBuilder.setValue(result.getValue()); - responseBuilder.setLog(result.getLogValue()); - } - - responseBuilder.setIndex(result.getIndex()); - responseBuilder.setValue(ByteString.copyFrom(result.getValue())); - responseBuilder.setLog(result.getLogValue()); - } - - return responseBuilder.build(); -} -``` - -### Handshake - -When the app or tendermint restarts, they need to sync to a common -height. When an ABCI connection is first established, Tendermint will -call `Info` on the Query connection. The response should contain the -LastBlockHeight and LastBlockAppHash - the former is the last block for -which the app ran Commit successfully, the latter is the response from -that Commit. - -Using this information, Tendermint will determine what needs to be -replayed, if anything, against the app, to ensure both Tendermint and -the app are synced to the latest block height. - -If the app returns a LastBlockHeight of 0, Tendermint will just replay -all blocks. - -In go: - -``` -func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{ - Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), - Version: version.ABCIVersion, - AppVersion: ProtocolVersion.Uint64(), - } -} -``` - -In Java: - -``` -ResponseInfo requestInfo(RequestInfo req) { - final byte[] lastAppHash = getLastAppHash(); - final long lastHeight = getLastHeight(); - return ResponseInfo.newBuilder().setLastBlockAppHash(ByteString.copyFrom(lastAppHash)).setLastBlockHeight(lastHeight).build(); -} -``` - -### Genesis - -`InitChain` will be called once upon the genesis. `params` includes the -initial validator set. Later on, it may be extended to take parts of the -consensus params. - -In go: - -``` -// Save the validators in the merkle tree -func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain { - for _, v := range req.Validators { - r := app.updateValidator(v) - if r.IsErr() { - app.logger.Error("Error updating validators", "r", r) - } - } - return types.ResponseInitChain{} -} -``` - -In Java: - -``` -/* - * all types come from protobuf definition - */ -ResponseInitChain requestInitChain(RequestInitChain req) { - final int validatorsCount = req.getValidatorsCount(); - final List validatorsList = req.getValidatorsList(); - - validatorsList.forEach((validator) -> { - long power = validator.getPower(); - byte[] validatorPubKey = validator.getPubKey().toByteArray(); - - // do somehing for validator setup in app - }); - - return ResponseInitChain.newBuilder().build(); -} -``` diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 4a83b0ad2..132258bf9 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -23,15 +23,15 @@ using Tendermint. The first apps we will work with are written in Go. To install them, you need to [install Go](https://golang.org/doc/install), put `$GOPATH/bin` in your `$PATH` and enable go modules with these instructions: + ```bash echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile -echo export GO111MODULE=on >> ~/.bash_profile ``` Then run -``` +```sh go get github.com/tendermint/tendermint cd $GOPATH/src/github.com/tendermint/tendermint make tools @@ -54,16 +54,16 @@ full transaction bytes are stored as the key and the value. Let's start a kvstore application. -``` +```sh abci-cli kvstore ``` -In another terminal, we can start Tendermint. You should already have the -Tendermint binary installed. If not, follow the steps from -[here](../introduction/install.md). If you have never run Tendermint +In another terminal, we can start Tendermint. You should already have the +Tendermint binary installed. If not, follow the steps from +[here](../introduction/install.md). If you have never run Tendermint before, use: -``` +```sh tendermint init tendermint node ``` @@ -76,7 +76,7 @@ details, see [the guide on using Tendermint](../tendermint-core/using-tendermint You should see Tendermint making blocks! We can get the status of our Tendermint node as follows: -``` +```sh curl -s localhost:26657/status ``` @@ -85,7 +85,7 @@ tool like [jq](https://stedolan.github.io/jq/) or `json_pp`. Now let's send some transactions to the kvstore. -``` +```sh curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' ``` @@ -95,7 +95,7 @@ transaction with bytes `abcd`, so `abcd` will be stored as both the key and the value in the Merkle tree. The response should look something like: -``` +```json { "jsonrpc": "2.0", "id": "", @@ -122,13 +122,13 @@ like: We can confirm that our transaction worked and the value got stored by querying the app: -``` +```sh curl -s 'localhost:26657/abci_query?data="abcd"' ``` The result should look like: -``` +```json { "jsonrpc": "2.0", "id": "", @@ -152,14 +152,14 @@ human-readable](https://github.com/tendermint/tendermint/issues/1794). Now let's try setting a different key and value: -``` +```sh curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' ``` Now if we query for `name`, we should get `satoshi`, or `c2F0b3NoaQ==` in base64: -``` +```sh curl -s 'localhost:26657/abci_query?data="name"' ``` @@ -195,13 +195,13 @@ Let's kill the previous instance of `tendermint` and the `kvstore` application, and start the counter app. We can enable `serial=on` with a flag: -``` +```sh abci-cli counter --serial ``` In another window, reset then start Tendermint: -``` +```sh tendermint unsafe_reset_all tendermint node ``` @@ -210,14 +210,14 @@ Once again, you can see the blocks streaming by. Let's send some transactions. Since we have set `serial=on`, the first transaction must be the number `0`: -``` +```sh curl localhost:26657/broadcast_tx_commit?tx=0x00 ``` Note the empty (hence successful) response. The next transaction must be the number `1`. If instead, we try to send a `5`, we get an error: -``` +```json > curl localhost:26657/broadcast_tx_commit?tx=0x05 { "jsonrpc": "2.0", @@ -236,7 +236,7 @@ the number `1`. If instead, we try to send a `5`, we get an error: But if we send a `1`, it works again: -``` +```json > curl localhost:26657/broadcast_tx_commit?tx=0x01 { "jsonrpc": "2.0", @@ -262,7 +262,7 @@ to [install node](https://nodejs.org/en/download/). You'll also need to fetch the relevant repository, from [here](https://github.com/tendermint/js-abci), then install it: -``` +```sh git clone https://github.com/tendermint/js-abci.git cd js-abci npm install abci @@ -270,13 +270,13 @@ npm install abci Kill the previous `counter` and `tendermint` processes. Now run the app: -``` +```sh node example/counter.js ``` In another window, reset and start `tendermint`: -``` +```sh tendermint unsafe_reset_all tendermint node ``` @@ -285,7 +285,7 @@ Once again, you should see blocks streaming by - but now, our application is written in Javascript! Try sending some transactions, and like before - the results should be the same: -``` +```sh # ok curl localhost:26657/broadcast_tx_commit?tx=0x00 # invalid nonce diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 3e3fcd551..579f8690d 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -4,8 +4,7 @@ order: 6 # Indexing Transactions -Tendermint allows you to index transactions and later query or subscribe -to their results. +Tendermint allows you to index transactions and later query or subscribe to their results. Events can be used to index transactions and blocks according to what happened during their execution. Note that the set of events returned for a block from @@ -14,9 +13,13 @@ type, only the key-value pairs defined in `EndBlock` are used. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more -details on `Events`, see the [ABCI]https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events) documentation. +details on `Events`, see the +[ABCI](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events) +documentation. + +An Event has a composite key associated with it. A `compositeKey` is +constructed by its type and key separated by a dot. -An Event has a composite key associated with it. A `compositeKey` is constructed by its type and key separated by a dot. For example: ```json @@ -39,34 +42,20 @@ Let's take a look at the `[tx_index]` config section: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "kv" - -# Comma-separated list of composite keys to index (by default the only key is "tx.hash") -# -# You can also index transactions by height by adding "tx.height" key here. -# -# It's recommended to index only a subset of keys due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_keys = "" - -# When set to true, tells indexer to index all compositeKeys (predefined keys: -# "tx.hash", "tx.height" and all keys from DeliverTx responses). -# -# Note this may be not desirable (see the comment above). Indexkeys has a -# precedence over IndexAllKeys (i.e. when given both, IndexKeys will be -# indexed). -index_all_keys = false ``` By default, Tendermint will index all transactions by their respective -hashes using an embedded simple indexer. Note, we are planning to add -more options in the future (e.g., PostgreSQL indexer). +hashes and height using an embedded simple indexer. + +You can turn off indexing completely by setting `tx_index` to `null`. ## Adding Events -In your application's `DeliverTx` method, add the `Events` field with pairs of -UTF-8 encoded strings (e.g. "transfer.sender": "Bob", "transfer.recipient": "Alice", -"transfer.balance": "100"). +Applications are free to define which events to index. Tendermint does not +expose functionality to define which events to index and which to ignore. In +your application's `DeliverTx` method, add the `Events` field with pairs of +UTF-8 encoded strings (e.g. "transfer.sender": "Bob", "transfer.recipient": +"Alice", "transfer.balance": "100"). Example: @@ -76,10 +65,11 @@ func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Resul events := []abci.Event{ { Type: "transfer", - Attributes: kv.Pairs{ - kv.Pair{Key: []byte("sender"), Value: []byte("Bob")}, - kv.Pair{Key: []byte("recipient"), Value: []byte("Alice")}, - kv.Pair{Key: []byte("balance"), Value: []byte("100")}, + Attributes: []abci.EventAttribute{ + {Key: []byte("sender"), Value: []byte("Bob"), Index: true}, + {Key: []byte("recipient"), Value: []byte("Alice"), Index: true}, + {Key: []byte("balance"), Value: []byte("100"), Index: true}, + {Key: []byte("note"), Value: []byte("nothing"), Index: true}, }, }, } @@ -87,22 +77,14 @@ func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.Resul } ``` -If you want Tendermint to only index transactions by "transfer.sender" event type, -in the config set `tx_index.index_tags="transfer.sender"`. If you to index all events, -set `index_all_tags=true` - -Note, there are a few predefined event types: - -- `tx.hash` (transaction's hash) -- `tx.height` (height of the block transaction was committed in) - -Tendermint will throw a warning if you try to use any of the above keys. +The transaction will be indexed (if the indexer is not `null`) with a certain attribute if the attribute's `Index` field is set to `true`. +In the above example, all attributes will be indexed. ## Querying Transactions You can query the transaction results by calling `/tx_search` RPC endpoint: -```shell +```bash curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true" ``` diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 7519951d9..68145d6e1 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -55,12 +55,15 @@ Note the context/background should be written in the present tense. - [ADR-024-Sign-Bytes](./adr-024-sign-bytes.md) - [ADR-025-Commit](./adr-025-commit.md) - [ADR-026-General-Merkle-Proof](./adr-026-general-merkle-proof.md) +- [ADR-028-libp2p](./adr-028-libp2p.md) - [ADR-029-Check-Tx-Consensus](./adr-029-check-tx-consensus.md) - [ADR-030-Consensus-Refactor](./adr-030-consensus-refactor.md) +- [ADR-030-Changelog-structure](./adr-031-changelog.md) - [ADR-033-Pubsub](./adr-033-pubsub.md) - [ADR-034-Priv-Validator-File-Structure](./adr-034-priv-validator-file-structure.md) - [ADR-035-Documentation](./adr-035-documentation.md) - [ADR-037-Deliver-Block](./adr-037-deliver-block.md) +- [ADR-038-Non-Zero-Start-Height](./adr-038-non-zero-start-height.md) - [ADR-039-Peer-Behaviour](./adr-039-peer-behaviour.md) - [ADR-041-Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) - [ADR-043-Blockchain-RiRi-Org](./adr-043-blockchain-riri-org.md) @@ -71,5 +74,12 @@ Note the context/background should be written in the present tense. - [ADR-051-Double-Signing-Risk-Reduction](./adr-051-double-signing-risk-reduction.md) - [ADR-052-Tendermint-Mode](./adr-052-tendermint-mode.md) - [ADR-053-State-Sync-Prototype](./adr-053-state-sync-prototype.md) -- [ADR-054-crypto-encoding-2](./adr-054-crypto-encoding-2.md) -- [ADR-055-protobuf-design](./adr-055-protobuf-design.md) +- [ADR-054-Crypto-Encoding-2](./adr-054-crypto-encoding-2.md) +- [ADR-055-Protobuf-Design](./adr-055-protobuf-design.md) +- [ADR-056-Light-Client-Amnesia-Attacks](./adr-056-light-client-amnesia-attacks) +- [ADR-057-RPC](./adr-057-RPC.md) +- [ADR-058-Event-Hashing](./adr-058-event-hashing.md) +- [ADR-059-Evidence-Composition-and-Lifecycle](./adr-059-evidence-composition-and-lifecycle.md) +- [ADR-060-Go-API-Stability](./adr-060-go-api-stability.md) +- [ADR-061-P2P-Refactor-Scope](./adr-061-p2p-refactor-scope.md) +- [ADR-062-P2P-Architecture](./adr-062-p2p-architecture.md) diff --git a/docs/architecture/adr-014-secp-malleability.md b/docs/architecture/adr-014-secp-malleability.md index e6014c09b..33f9d0044 100644 --- a/docs/architecture/adr-014-secp-malleability.md +++ b/docs/architecture/adr-014-secp-malleability.md @@ -43,7 +43,7 @@ This is the same solution Ethereum has chosen for solving secp malleability. ## Proposed Implementation -Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](https://github.com/btcsuite/btcd/blob/master/btcec/signature.go#195) and serialize functions to enforce our canonical form. +Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](https://github.com/btcsuite/btcd/blob/11fcd83963ab0ecd1b84b429b1efc1d2cdc6d5c5/btcec/signature.go#L195) and serialize functions to enforce our canonical form. ## Status diff --git a/docs/architecture/adr-023-ABCI-propose-tx.md b/docs/architecture/adr-023-ABCI-propose-tx.md index 497ccd184..a545fd5b6 100644 --- a/docs/architecture/adr-023-ABCI-propose-tx.md +++ b/docs/architecture/adr-023-ABCI-propose-tx.md @@ -128,7 +128,7 @@ message ResponseProposeTx { ``` `ProposeTx` would be called by before `mempool.Reap` at this -[line](https://github.com/tendermint/tendermint/blob/master/consensus/state.go#L906). +[line](https://github.com/tendermint/tendermint/blob/9cd9f3338bc80a12590631632c23c8dbe3ff5c34/consensus/state.go#L935). Depending on whether `exclusive` is `true` or `false`, the proposed transactions are then pushed on top of the transactions received from `mempool.Reap`. diff --git a/docs/architecture/adr-028-libp2p.md b/docs/architecture/adr-028-libp2p.md new file mode 100644 index 000000000..fbfe32f12 --- /dev/null +++ b/docs/architecture/adr-028-libp2p.md @@ -0,0 +1,38 @@ +# ADR 028: : LibP2P Integration + +## Changelog + +- {date}: {changelog} + +## Context + +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + +## Decision + +> This section explains all of the details of the proposed solution, including implementation details. +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted|Declined} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-029-check-tx-consensus.md b/docs/architecture/adr-029-check-tx-consensus.md index c1b882c61..191a0ec8e 100644 --- a/docs/architecture/adr-029-check-tx-consensus.md +++ b/docs/architecture/adr-029-check-tx-consensus.md @@ -43,7 +43,6 @@ However, this method should not be implemented like that, because checkTx will s type Application interface { // Info/Query Connection Info(RequestInfo) ResponseInfo // Return application info - SetOption(RequestSetOption) ResponseSetOption // Set application option Query(RequestQuery) ResponseQuery // Query for state // Mempool Connection diff --git a/docs/architecture/adr-031-changelog.md b/docs/architecture/adr-031-changelog.md new file mode 100644 index 000000000..56fb875a2 --- /dev/null +++ b/docs/architecture/adr-031-changelog.md @@ -0,0 +1,38 @@ +# ADR 031: Changelog Structure + +## Changelog + +- {date}: {changelog} + +## Context + +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + +## Decision + +> This section explains all of the details of the proposed solution, including implementation details. +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted|Declined} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-034-priv-validator-file-structure.md b/docs/architecture/adr-034-priv-validator-file-structure.md index 83160bfb8..8eb7464b4 100644 --- a/docs/architecture/adr-034-priv-validator-file-structure.md +++ b/docs/architecture/adr-034-priv-validator-file-structure.md @@ -57,7 +57,7 @@ What we need to do next is changing the methods of `FilePV`. ## Status -Draft. +Accepted and implemented in [#2870](https://github.com/tendermint/tendermint/pull/2870). ## Consequences diff --git a/docs/architecture/adr-036-empty-blocks-abci.md b/docs/architecture/adr-036-empty-blocks-abci.md new file mode 100644 index 000000000..ec4806cfa --- /dev/null +++ b/docs/architecture/adr-036-empty-blocks-abci.md @@ -0,0 +1,38 @@ +# ADR 036: Empty Blocks via ABCI + +## Changelog + +- {date}: {changelog} + +## Context + +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + +## Decision + +> This section explains all of the details of the proposed solution, including implementation details. +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted|Declined} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-038-non-zero-start-height.md b/docs/architecture/adr-038-non-zero-start-height.md new file mode 100644 index 000000000..7dd474ec7 --- /dev/null +++ b/docs/architecture/adr-038-non-zero-start-height.md @@ -0,0 +1,38 @@ +# ADR 038: Non-zero start height + +## Changelog + +- {date}: {changelog} + +## Context + +> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. + +## Decision + +> This section explains all of the details of the proposed solution, including implementation details. +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted|Declined} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md index 2109e2952..a4dc0aad4 100644 --- a/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md +++ b/docs/architecture/adr-044-lite-client-with-weak-subjectivity.md @@ -32,7 +32,7 @@ fork the network at some point in its prior history. See Vitalik’s post at Subjectivity](https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/). Currently, Tendermint provides a lite client implementation in the -[lite](https://github.com/tendermint/tendermint/tree/master/lite) package. This +[light](https://github.com/tendermint/tendermint/tree/master/light) package. This lite client implements a bisection algorithm that tries to use a binary search to find the minimum number of block headers where the validator set voting power changes are less than < 1/3rd. This interface does not support weak diff --git a/docs/architecture/adr-045-abci-evidence.md b/docs/architecture/adr-045-abci-evidence.md index 3cb91be75..f694c71d4 100644 --- a/docs/architecture/adr-045-abci-evidence.md +++ b/docs/architecture/adr-045-abci-evidence.md @@ -31,7 +31,7 @@ checking). Currently, evidence verification is handled by Tendermint. Once committed, [evidence is passed over -ABCI](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto#L321) +ABCI](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto#L354) in BeginBlock in a reduced form that includes only the type of evidence, its height and timestamp, the validator it's from, and the total voting power of the validator set at the height. The app trusts Tendermint diff --git a/docs/architecture/adr-047-handling-evidence-from-light-client.md b/docs/architecture/adr-047-handling-evidence-from-light-client.md index 8b3a850ba..4de47819a 100644 --- a/docs/architecture/adr-047-handling-evidence-from-light-client.md +++ b/docs/architecture/adr-047-handling-evidence-from-light-client.md @@ -3,184 +3,225 @@ ## Changelog * 18-02-2020: Initial draft * 24-02-2020: Second version +* 13-04-2020: Add PotentialAmnesiaEvidence and a few remarks +* 31-07-2020: Remove PhantomValidatorEvidence +* 14-08-2020: Introduce light traces (listed now as an alternative approach) +* 20-08-2020: Light client produces evidence when detected instead of passing to full node +* 16-09-2020: Post-implementation revision + +### Glossary of Terms + +- a `LightBlock` is the unit of data that a light client receives, verifies and stores. +It is composed of a validator set, commit and header all at the same height. +- a **Trace** is seen as an array of light blocks across a range of heights that were +created as a result of skipping verification. +- a **Provider** is a full node that a light client is connected to and serves the light +client signed headers and validator sets. +- `VerifySkipping` (sometimes known as bisection or verify non-adjacent) is a method the +light client uses to verify a target header from a trusted header. The process involves verifying +intermediate headers in between the two by making sure that 1/3 of the validators that signed +the trusted header also signed the untrusted one. +- **Light Bifurcation Point**: If the light client was to run `VerifySkipping` with two providers +(i.e. a primary and a witness), the bifurcation point is the height that the headers +from each of these providers are different yet valid. This signals that one of the providers +may be trying to fool the light client. ## Context -If the light client is under attack, either directly -> lunatic/phantom -validators (light fork) or indirectly -> full fork, it's supposed to halt and -send evidence of misbehavior to a correct full node. Upon receiving an -evidence, the full node should punish malicious validators (if possible). +The bisection method of header verification used by the light client exposes +itself to a potential attack if any block within the light clients trusted period has +a malicious group of validators with power that exceeds the light clients trust level +(default is 1/3). To improve light client (and overall network) security, the light +client has a detector component that compares the verified header provided by the +primary against witness headers. This ADR outlines the process of mitigating attacks +on the light client by using witness nodes to cross reference with. -## Decision +## Alternative Approaches -When a light client sees two conflicting headers (`H1.Hash() != H2.Hash()`, -`H1.Height == H2.Height`), both having 1/3+ of the voting power of the -currently trusted validator set, it will submit a `ConflictingHeadersEvidence` -to all full nodes it's connected to. Evidence needs to be submitted to all full -nodes since there's no way to determine which full node is correct (honest). +A previously discussed approach to handling evidence was to pass all the data that the +light client had witnessed when it had observed diverging headers for the full node to +process.This was known as a light trace and had the following structure: ```go -type ConflictingHeadersEvidence struct { - H1 types.SignedHeader - H2 types.SignedHeader +type ConflictingHeadersTrace struct { + Headers []*types.SignedHeader } ``` -When a full node receives the `ConflictingHeadersEvidence` evidence, it should -a) validate it b) figure out if malicious behaviour is obvious (immediately -slashable) or the fork accountability protocol needs to be started. +This approach has the advantage of not requiring as much processing on the light +client side in the event that an attack happens. Although, this is not a significant +difference as the light client would in any case have to validate all the headers +from both witness and primary. Using traces would consume a large amount of bandwidth +and adds a DDOS vector to the full node. -### Validating headers -Check both headers are valid (`ValidateBasic`), have the same height, and -signed by 1/3+ of the validator set that the full node had at height -`H1.Height-1`. +## Decision -- Q: What if light client validator set is not equal to full node's validator - set (i.e. from full node's point of view both headers are not properly signed; - this includes the case where none of the two headers were committed on the - main chain) +The light client will be divided into two components: a `Verifier` (either sequential or +skipping) and a `Detector` (see [Informal's Detector](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/detection/detection.md)) +. The detector will take the trace of headers from the primary and check it against all +witnesses. For a witness with a diverging header, the detector will first verify the header +by bisecting through all the heights defined by the trace that the primary provided. If valid, +the light client will trawl through both traces and find the point of bifurcation where it +can proceed to extract any evidence (as is discussed in detail later). - Reject the evidence. It means light client is following a fork, but, hey, at - least it will halt. +Upon successfully detecting the evidence, the light client will send it to both primary and +witness before halting. It will not send evidence to other peers nor continue to verify the +primary's header against any other header. -- Q: Don't we want to punish validators who signed something else even if they - have less or equal than 1/3? - No consensus so far. Ethan said no, Zarko said yes. - https://github.com/tendermint/spec/pull/71#discussion_r374210533 +## Detailed Design -### Figuring out if malicious behaviour is immediately slashable +The verification process of the light client will start from a trusted header and use a bisectional +algorithm to verify up to a header at a given height. This becomes the verified header (does not +mean that it is trusted yet). All headers that were verified in between are cached and known as +intermediary headers and the entire array is sometimes referred to as a trace. -Let's say H1 was committed from this full node's perspective (see Appendix A). -Intersect validator sets of H1 and H2. +The light client's detector then takes all the headers and runs the detect function. -* if there are signers(H2) that are not part of validators(H1), they misbehaved as -they are signing protocol messages in heights they are not validators => -immediately slashable (#F4). +```golang +func (c *Client) detectDivergence(primaryTrace []*types.LightBlock, now time.Time) error +``` -* if `H1.Round == H2.Round`, and some signers signed different precommit -messages in both commits, then it is an equivocation misbehavior => immediately -slashable (#F1). +The function takes the last header it received, the target header and compares it against all the witnesses +it has through the following function: -* if `H1.Round != H2.Round` we need to run full detection procedure => not -immediately slashable. +```golang +func (c *Client) compareNewHeaderWithWitness(errc chan error, h *types.SignedHeader, + witness provider.Provider, witnessIndex int) +``` -* if `ValidatorsHash`, `NextValidatorsHash`, `ConsensusHash`, -`AppHash`, and `LastResultsHash` in H2 are different (incorrect application -state transition), then it is a lunatic misbehavior => immediately slashable (#F5). +The err channel is used to send back all the outcomes so that they can be processed in parallel. +Invalid headers result in dropping the witness, lack of response or not having the headers is ignored +just as headers that have the same hash. Headers, however, +of a different hash then trigger the detection process between the primary and that particular witness. -If evidence is not immediately slashable, fork accountability needs to invoked -(ADR does not yet exist). +This begins with verification of the witness's header via skipping verification which is run in tande +with locating the Light Bifurcation Point -It's unclear if we should further break up `ConflictingHeadersEvidence` or -gossip and commit it directly. See -https://github.com/tendermint/tendermint/issues/4182#issuecomment-590339233 +![](../imgs/light-client-detector.png) -If we'd go without breaking evidence, all we'll need to do is to strip the -committed header from `ConflictingHeadersEvidence` (H1) and leave only the -uncommitted header (H2): +This is done with: -```go -type ConflictingHeaderEvidence struct { - H types.SignedHeader -} +```golang +func (c *Client) examineConflictingHeaderAgainstTrace( + trace []*types.LightBlock, + divergentHeader *types.SignedHeader, + source provider.Provider, now time.Time) ([]*types.LightBlock, *types.LightBlock, error) ``` -If we'd go with breaking evidence, here are the types we'll need: +which performs the following -### F1. Equivocation +1. Checking that the trusted header is the same. Currently, they should not theoretically be different +because witnesses cannot be added and removed after the client is initialized. But we do this any way +as a sanity check. If this fails we have to drop the witness. -Existing `DuplicateVoteEvidence` needs to be created and gossiped. +2. Querying and verifying the witness's headers using bisection at the same heights of all the +intermediary headers of the primary (In the above example this is A, B, C, D, F, H). If bisection fails or the witness stops responding then +we can call the witness faulty and drop it. -### F4. Phantom validators +3. We eventually reach a verified header by the witness which is not the same as the intermediary header (In the above example this is E). +This is the point of bifurcation (This could also be the last header). -A new type of evidence needs to be created: +This function then returns the trace of blocks from the witness node between the common header and the +divergent header of the primary as it +is likely as seen in the example to the right below that multiple headers where required in order to +verify the divergent one. This trace will +be used later (as is also described later in this document). -```go -type PhantomValidatorEvidence struct { - PubKey crypto.PubKey - Vote types.Vote -} -``` - -It contains a validator's public key and a vote for a block, where this -validator is not part of the validator set. +![](../imgs/bifurcation-point.png) -### F5. Lunatic validator +Now, that an attack has been detected, the light client must form evidence to prove it. There are +three types of attacks that either the primary or witness could have done to try fool the light client +into verifying the wrong header: Lunatic, Equivocation and Amnesia. As the consequence is the same and +the data required to prove it is also very similar, we bundle these attack styles together in a single +evidence: -```go -type LunaticValidatorEvidence struct { - Header types.Header - Vote types.Vote +```golang +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock + CommonHeight int64 } ``` -To punish this attack, we need support for a new Evidence type - -`LunaticValidatorEvidence`. This type includes a vote and a header. The header -must contain fields that are invalid with respect to the previous block, and a -vote for that header by a validator that was in a validator set within the -unbonding period. While the attack is only possible if +1/3 of some validator -set colludes, the evidence should be verifiable independently for each -individual validator. This means the total evidence can be split into one piece -of evidence per attacking validator and gossipped to nodes to be verified one -piece at a time, reducing the DoS attack surface at the peer layer. - -Note it is not sufficient to simply compare this header with that committed for -the corresponding height, as an honest node may vote for a header that is not -ultimately committed. Certain fields may also be variable, for instance the -`LastCommitHash` and the `Time` may depend on which votes the proposer includes. -Thus, the header must be explicitly checked for invalid data. - -For the attack to succeed, VC must sign a header that changes the validator set -to consist of something they control. Without doing this, they can not -otherwise attack the light client, since the client verifies commits according -to validator sets. Thus, it should be sufficient to check only that -`ValidatorsHash` and `NextValidatorsHash` are correct with respect to the -header that was committed at the corresponding height. - -That said, if the attack is conducted by +2/3 of the validator set, they don't -need to make an invalid change to the validator set, since they already control -it. Instead they would make invalid changes to the `AppHash`, or possibly other -fields. In order to punish them, then, we would have to check all header -fields. - -Note some header fields require the block itself to verify, which the light -client, by definition, does not possess, so it may not be possible to check -these fields. For now, then, `LunaticValidatorEvidence` must be checked against -all header fields which are a function of the application at previous blocks. -This includes `ValidatorsHash`, `NextValidatorsHash`, `ConsensusHash`, -`AppHash`, and `LastResultsHash`. These should all match what's in the header -for the block that was actually committed at the corresponding height, and -should thus be easy to check. +The light client takes the stance of first suspecting the primary. Given the bifurcation point found +above, it takes the two divergent headers and compares whether the one from the primary is valid with +respect to the one from the witness. This is done by calling `isInvalidHeader()` which looks to see if +any one of the deterministically derived header fields differ from one another. This could be one of +`ValidatorsHash`, `NextValidatorsHash`, `ConsensusHash`, `AppHash`, and `LastResultsHash`. +In this case we know it's a Lunatic attack and to help the witness verify it we send the height +of the common header which is 1 in the example above or C in the example above that. If all these +hashes are the same then we can infer that it is either Equivocation or Amnesia. In this case we send +the height of the diverged headers because we know that the validator sets are the same, hence the +malicious nodes are still bonded at that height. In the example above, this is height 10 and the +example above that it is the height at E. + +The light client now has the evidence and broadcasts it to the witness. + +However, it could have been that the header the light client used from the witness against the primary +was forged, so before halting the light client swaps the process and thus suspects the witness and +uses the primary to create evidence. It calls `examineConflictingHeaderAgainstTrace` this time using +the witness trace found earlier. +If the primary was malicious it is likely that it will not respond but if it is innocent then the +light client will produce the same evidence but this time the conflicting +block will come from the witness node instead of the primary. The evidence is then formed and sent to +the primary node. + +This then ends the process and the verify function that was called at the start returns the error to +the user. + +For a detailed overview of how each of these three attacks can be conducted please refer to the +[fork accountability spec]((https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md)). + +## Full Node Verification + +When a full node receives evidence from the light client it will need to verify +it for itself before gossiping it to peers and trying to commit it on chain. This process is outlined + in [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md). ## Status -Proposed. +Implemented. ## Consequences ### Positive -* Tendermint will be able to detect & punish new types of misbehavior -* light clients connected to multiple full nodes can help full nodes notice a - fork faster +* Light client has increased security against Lunatic, Equivocation and Amnesia attacks. +* Do not need intermediate data structures to encapsulate the malicious behavior +* Generalized evidence makes the code simpler ### Negative -* Accepting `ConflictingHeadersEvidence` from light clients opens up a DDOS -attack vector (same is fair for any RPC endpoint open to public; remember that -RPC is not open by default). +* Breaking change on the light client from versions 0.33.8 and below. Previous +versions will still send `ConflictingHeadersEvidence` but it won't be recognized +by the full node. Light clients will however still refuse the header and shut down. +* Amnesia attacks although detected, will not be able to be punished as it is not +clear from the current information which nodes behaved maliciously. +* Evidence module must handle both individual and grouped evidence. ### Neutral ## References * [Fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) +* [ADR 056: Light client amnesia attacks](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-056-light-client-amnesia-attacks.md) +* [ADR-059: Evidence Composition and Lifecycle](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) +* [Informal's Light Client Detector](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/detection/detection.md) + ## Appendix A -If there is an actual fork (full fork), a full node may follow either one or -another branch. So both H1 or H2 can be considered committed depending on which -branch the full node is following. It's supposed to halt if it notices an -actual fork, but there's a small chance it doesn't. +PhantomValidatorEvidence was used to capture when a validator that was still staked +(i.e. within the bonded period) but was not in the current validator set had voted for a block. + +In later discussions it was argued that although possible to keep phantom validator +evidence, any case a phantom validator that could have the capacity to be involved +in fooling a light client would have to be aided by 1/3+ lunatic validators. + +It would also be very unlikely that the new validators injected by the lunatic attack +would be validators that currently still have something staked. + +Not only this but there was a large degree of extra computation required in storing all +the currently staked validators that could possibly fall into the group of being +a phantom validator. Given this, it was removed. diff --git a/docs/architecture/adr-051-double-signing-risk-reduction.md b/docs/architecture/adr-051-double-signing-risk-reduction.md index ae663e8b5..e5ec33845 100644 --- a/docs/architecture/adr-051-double-signing-risk-reduction.md +++ b/docs/architecture/adr-051-double-signing-risk-reduction.md @@ -28,8 +28,8 @@ We would like to suggest a double signing risk reduction method. - Configuration - We would like to suggest by introducing `double_sign_check_height` parameter in `config.toml` and cli, how many blocks state machine looks back to check votes - `double_sign_check_height = {{ .Consensus.DoubleSignCheckHeight }}` in `config.toml` - - `tendermint node --double_sign_check_height` in cli - - State machine ignore checking procedure when `vote-check-height == 0` + - `tendermint node --consensus.double_sign_check_height` in cli + - State machine ignore checking procedure when `double_sign_check_height == 0` ## Status diff --git a/docs/architecture/adr-053-state-sync-prototype.md b/docs/architecture/adr-053-state-sync-prototype.md index 2848f9dd4..90169f44c 100644 --- a/docs/architecture/adr-053-state-sync-prototype.md +++ b/docs/architecture/adr-053-state-sync-prototype.md @@ -1,5 +1,7 @@ # ADR 053: State Sync Prototype +State sync is now [merged](https://github.com/tendermint/tendermint/pull/4705). Up-to-date ABCI documentation is [available](https://github.com/tendermint/spec/pull/90), refer to it rather than this ADR for details. + This ADR outlines the plan for an initial state sync prototype, and is subject to change as we gain feedback and experience. It builds on discussions and findings in [ADR-042](./adr-042-state-sync.md), see that for background information. ## Changelog @@ -22,6 +24,8 @@ This ADR outlines the plan for an initial state sync prototype, and is subject t * ABCI: chunks are now 0-indexed, for parity with `chunk_hashes` array. * Reduced maximum chunk size to 16 MB, and increased snapshot message size to 4 MB. +* 2020-04-29: Update with final released ABCI interface (Erik Grinaker) + ## Context State sync will allow a new node to receive a snapshot of the application state without downloading blocks or going through consensus. This bootstraps the node significantly faster than the current fast sync system, which replays all historical blocks. @@ -48,10 +52,11 @@ A node can have multiple snapshots taken at various heights. Snapshots can be ta ```proto message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - repeated bytes chunk_hashes = 3; // SHA-256 checksums of all chunks, in order - bytes metadata = 4; // Arbitrary application metadata + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash - should be equal only for identical snapshots + bytes metadata = 5; // Arbitrary application metadata } ``` @@ -64,50 +69,57 @@ Chunks are exchanged simply as `bytes`, and cannot be larger than 16 MB. `Snapsh message RequestListSnapshots {} message ResponseListSnapshots { - repeated Snapshot snapshots = 1; + repeated Snapshot snapshots = 1; } // Offers a snapshot to the application message RequestOfferSnapshot { - Snapshot snapshot = 1; - bytes app_hash = 2; -} + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height + } message ResponseOfferSnapshot { - bool accepted = 1; - Reason reason = 2; - - enum Reason { // Reason why snapshot was rejected - unknown = 0; // Unknown or generic reason - invalid_height = 1; // Height is rejected: avoid this height - invalid_format = 2; // Format is rejected: avoid this format - } + Result result = 1; + + enum Result { + accept = 0; // Snapshot accepted, apply chunks + abort = 1; // Abort all snapshot restoration + reject = 2; // Reject this specific snapshot, and try a different one + reject_format = 3; // Reject all snapshots of this format, and try a different one + reject_sender = 4; // Reject all snapshots from the sender(s), and try a different one + } } // Loads a snapshot chunk message RequestLoadSnapshotChunk { - uint64 height = 1; - uint32 format = 2; - uint32 chunk = 3; // Zero-indexed + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; // Zero-indexed } message ResponseLoadSnapshotChunk { - bytes chunk = 1; + bytes chunk = 1; } // Applies a snapshot chunk message RequestApplySnapshotChunk { - bytes chunk = 1; -} + uint32 index = 1; + bytes chunk = 2; + string sender = 3; + } message ResponseApplySnapshotChunk { - bool applied = 1; - Reason reason = 2; // Reason why chunk failed - - enum Reason { // Reason why chunk failed - unknown = 0; // Unknown or generic reason - verify_failed = 1; // Snapshot verification failed - } + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply (regardless of result) + repeated string reject_senders = 3; // Chunk senders to reject and ban (regardless of result) + + enum Result { + accept = 0; // Chunk successfully accepted + abort = 1; // Abort all snapshot restoration + retry = 2; // Retry chunk, combine with refetch and reject as appropriate + retry_snapshot = 3; // Retry snapshot, combine with refetch and reject as appropriate + reject_snapshot = 4; // Reject this snapshot, try a different one but keep sender rejections + } } ``` @@ -141,15 +153,15 @@ When starting an empty node with state sync and fast sync enabled, snapshots are 4. The node requests available snapshots via P2P from peers, via `RequestListSnapshots`. Peers will return the 10 most recent snapshots, one message per snapshot. -5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are `chunk_hashes` mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: +5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: * The snapshot height's block is considered trustworthy by the light client (i.e. snapshot height is greater than trusted header and within unbonding period of the latest trustworthy block). - * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot` call (via `invalid_height` or `invalid_format`). + * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot`. * The application accepts the `RequestOfferSnapshot` call. -6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunk messages cannot exceed 16 MB. +6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`. Chunk messages cannot exceed 16 MB. 7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`. @@ -191,38 +203,6 @@ Taking consistent snapshots of IAVL trees is greatly simplified by them being ve Snapshots must also be garbage collected after some configurable time, e.g. by keeping the latest `n` snapshots. -## Experimental Prototype - -An experimental but functional state sync prototype is available in the `erik/statesync-prototype` branches of the Tendermint, IAVL, Cosmos SDK, and Gaia repositories. To fetch the necessary branches: - -```sh -$ mkdir statesync -$ cd statesync -$ git clone git@github.com:tendermint/tendermint -b erik/statesync-prototype -$ git clone git@github.com:tendermint/iavl -b erik/statesync-prototype -$ git clone git@github.com:cosmos/cosmos-sdk -b erik/statesync-prototype -$ git clone git@github.com:cosmos/gaia -b erik/statesync-prototype -``` - -To spin up three nodes of a four-node testnet: - -```sh -$ cd gaia -$ ./tools/start.sh -``` - -Wait for the first snapshot to be taken at height 3, then (in a separate terminal) start the fourth node with state sync enabled: - -```sh -$ ./tools/sync.sh -``` - -To stop the testnet, run: - -```sh -$ ./tools/stop.sh -``` - ## Resolved Questions * Is it OK for state-synced nodes to not have historical blocks nor historical IAVL versions? @@ -265,46 +245,6 @@ $ ./tools/stop.sh > No, just use a max message size. -## Implementation Plan - -### Core Tasks - -* **Tendermint:** light client P2P transport [#4456](https://github.com/tendermint/tendermint/issues/4456) - -* **IAVL:** export/import API [#210](https://github.com/tendermint/iavl/issues/210) - -* **Cosmos SDK:** snapshotting, scheduling, and pruning [#5689](https://github.com/cosmos/cosmos-sdk/issues/5689) - -* **Tendermint:** support starting with a truncated block history - -* **Tendermint:** state sync reactor and ABCI interface [#828](https://github.com/tendermint/tendermint/issues/828) - -* **Cosmos SDK:** snapshot ABCI implementation [#5690](https://github.com/cosmos/cosmos-sdk/issues/5690) - -### Nice-to-Haves - -* **Tendermint:** staged reactor startup (state sync → fast sync → block replay → wal replay → consensus) - - > Let's do a time-boxed prototype (a few days) and see how much work it will be. - - * Notify P2P peers about channel changes [#4394](https://github.com/tendermint/tendermint/issues/4394) - - * Check peers have certain channels [#1148](https://github.com/tendermint/tendermint/issues/1148) - -* **Tendermint:** prune blockchain history [#3652](https://github.com/tendermint/tendermint/issues/3652) - -* **Tendermint:** allow genesis to start from non-zero height [#2543](https://github.com/tendermint/tendermint/issues/2543) - -### Follow-up Tasks - -* **Tendermint:** light client verification for fast sync [#4457](https://github.com/tendermint/tendermint/issues/4457) - -* **Tendermint:** allow start with only blockstore [#3713](https://github.com/tendermint/tendermint/issues/3713) - -* **Tendermint:** node should go back to fast-syncing when lagging significantly [#129](https://github.com/tendermint/tendermint/issues/129) - -* **Tendermint:** backfill historical blocks [#4629](https://github.com/tendermint/tendermint/issues/4629) - ## Status Accepted diff --git a/docs/architecture/adr-055-protobuf-design.md b/docs/architecture/adr-055-protobuf-design.md index 5500fc2d8..0f71689b5 100644 --- a/docs/architecture/adr-055-protobuf-design.md +++ b/docs/architecture/adr-055-protobuf-design.md @@ -3,6 +3,7 @@ ## Changelog - 2020-4-15: Created (@marbar3778) +- 2020-6-18: Updated (@marbar3778) ## Context @@ -33,13 +34,13 @@ There are a few options to pick from: Transition Tendermint to Protobuf because of its performance and tooling. The Ecosystem behind Protobuf is vast and has outstanding [support for many languages](https://developers.google.com/protocol-buffers/docs/tutorials). -We will be making this possible by keeping the current types in there current form (handwritten) and creating a `/proto` directory in which all the `.proto` files will live. Where encoding is needed, on disk and over the wire, we will call util functions that will transition the types from handwritten go types to protobuf generated types. +We will be making this possible by keeping the current types in there current form (handwritten) and creating a `/proto` directory in which all the `.proto` files will live. Where encoding is needed, on disk and over the wire, we will call util functions that will transition the types from handwritten go types to protobuf generated types. This is inline with the recommended file structure from [buf](https://buf.build). You can find more information on this file structure [here](https://buf.build/docs/lint-checkers#file_layout). By going with this design we will enable future changes to types and allow for a more modular codebase. ## Status -Proposed +Completed ## Consequences diff --git a/docs/architecture/adr-056-light-client-amnesia-attacks.md b/docs/architecture/adr-056-light-client-amnesia-attacks.md new file mode 100644 index 000000000..218ad32eb --- /dev/null +++ b/docs/architecture/adr-056-light-client-amnesia-attacks.md @@ -0,0 +1,170 @@ +# ADR 056: Light client amnesia attacks + +## Changelog + +- 02.04.20: Initial Draft +- 06.04.20: Second Draft +- 10.06.20: Post Implementation Revision +- 19.08.20: Short Term Amnesia Alteration +- 01.10.20: Status of Amnesia for 0.34 + +## Context + +Whilst most created evidence of malicious behavior is self evident such that any individual can verify them independently there are types of evidence, known collectively as global evidence, that require further collaboration from the network in order to accumulate enough information to create evidence that is individually verifiable and can therefore be processed through consensus. [Fork Accountability](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) has been coined to describe the entire process of detection, proving and punishing of malicious behavior. This ADR addresses specifically what a light client amnesia attack is and how it can be proven and the current decision around handling light client amnesia attacks. For information on evidence handling by the light client, it is recommended to read [ADR 47](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-047-handling-evidence-from-light-client.md). + +### Amnesia Attack + +The schematic below explains a scenario where an amnesia attack can occur such that two sets of honest nodes, C1 and C2, commit different blocks. + +![](../imgs/tm-amnesia-attack.png) + +1. C1 and F send PREVOTE messages for block A. +2. C1 sends PRECOMMIT for round 1 for block A. +3. A new round is started, C2 and F send PREVOTE messages for a different block B. +4. C2 and F then send PRECOMMIT messages for block B. +5. F later on creates PRECOMMITS for block A and combines it with those from C1 to form a block + + +This forged block can then be used to fool light clients trying to verify it. It must be stressed that there are a few more hurdles or dimensions to the attack to consider.For a more detailed walkthrough refer to Appendix A. + +## Decision + +The decision surrounding amnesia attacks has both a short term and long term component. In the long term, a more sturdy protocol will need to be fleshed out and implemented. There is already draft documents outlining what such a protocol would look like and the resources it would require (see references). Prior revisions however outlined a protocol which had been implemented (See Appendix B). It was agreed that it still required greater consideration and review given it's importance. It was therefore discussed, with the limited time frame set before 0.34, whether the protocol should be completely removed or if there should remain some logic in handling the aforementioned scenarios. + +The latter of the two options meant storing a record of all votes in any height with which there was more than one round. This information would then be accessible for applications if they wanted to perform some off-chain verification and punishment. + +In summary, this seemed like too much to ask of the application to implement only on a temporary basis, whilst not having the domain specific knowledge and considering such a difficult and unlikely attack. Therefore the short term decision is to identify when the attack has occurred and implement the detector algorithm highlighted in [ADR 47](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-047-handling-evidence-from-light-client.md) but to not implement any accountability protocol that would identify malicious validators and allow applications to punish them. This will hopefully change in the long term with the focus on eventually reaching a concrete and secure protocol with identifying and dealing with these attacks. + +## Implications + +- Light clients will still be able to detect amnesia attacks so long as the assumption of having at least one correct witness holds +- Light clients will gossip the attack to witnesses and halt thus failing to validate the incorrect block (and therefore not being fooled) +- Validators will propose and commit evidence of the amnesia attack on chain +- No evidence will be passed to the application indicting any malicious validators, thus meaning that no malicious validators will be punished for performing the attack +- If a light clients bubble of providers are all faulty the light client will falsely validate amnesia attacks as well as any other 1/3+ light client attack. + +## Status + +Implemented + +## Consequences + +### Positive + +Light clients are still able to prevent falsely validating a block. + +Already implemented. + +### Negative + +Light clients where all witnesses are faulty can be subject to an amnesia attack and verify a forged block that is not part of the chain. + +### Neutral + + +## References + +- [Fork accountability algorithm](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit) +- [Fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) + +## Appendix A: Detailed Walkthrough of Performing a Light Client Amnesia Attack + +As the attacker, a prerequisite to this attack is first to observe or attempt to craft a block where a subset (less than ⅓) of correct validators sent precommit votes for a proposal in an earlier round and later received ⅔ prevotes for a different proposal thus changing their lock and correctly sending precommit votes (and later committing) for the proposal in the latter round. The second prerequisite is to have at least ⅓ validating power in that height (or enough voting power to have ⅔+ when combined with the precommits of the earlier round). + +To go back to how one may craft such a block, we begin with one of the validators in this cabal being the proposer. They propose a block with all the txs that they want to fool a light client with. The proposer then only relays this to the members of their cabal and a controlled subset of correct validators (less than ⅓). We will call ourselves f for faulty and c1 for this correct subset. + +Attackers need to rely on the assistance of some form of a network partition or on the nature of the sporadic voting to conjure their desired environment. The attackers need at least ⅓ of the validating power of the remaining correct validators, we shall denote this as c2, to not see ⅔ prevotes and thus not be locked on a block when it comes to the next round. If we have less than ⅓ remaining validators that don’t see this first proposal, then we will not have enough voting power to reach ⅔+ prevotes (the sum of f and c2) in the following round and thus change the lock of c1 such that we correctly commit the block in the latter round yet have enough precommits in the earlier round to fool the light client. Remember this is our desired scenario: to save all these precommit votes for a different (in this case earlier) proposed block. + +To try to break this down even further let’s go back to the first round. F sends c1 a proposal (and not c2), c1 in turn sends their prevotes to all whom they are connected to. This means that some will be received by c2. F then sends their prevotes just to c1. Now not all validators in c1 may be connected to each other, so perhaps some validators in c1 might not receive ⅔ (from their own cohort and from f) and thus not precommit. In other situations we may see a validator in c2 connected to all validators in c1. Therefore they too will receive ⅔ prevotes and thus precommit. We can conclude therefore that although targeting this c1 subset of validators, those that actually precommit may be somewhat different. The key is for the attackers to observe the n amount of precommits they need in round 1 where n is ⅔+ - f, whilst ensuring that n itself does not go over ⅓. If it does then less than ⅔ validators remain to be able to change the lock and commit the block in the later round. + +An extra dimension to this puzzle is the timeouts. Whilst c1 is relaying votes to its peers and these validators count closer towards the ⅔ threshold needed to send their precommit votes at any moment the timeout could be reached and thus the nodes will precommit nil and ignore any late prevote messages. + +This is all to say that such an attack is partly out of the attackers hands. All they can do is tweak the subset of validators that they first choose to gossip the proposal and modify the timings around when they send their prevotes until they reach the desired precondition: n precommits for an earlier proposal and ⅔ precommits for the later proposal. So this is up to the gods of non deterministic behavior to help them out with their plight. I’m not going to allocate the hours to calculate the probability but it could be in the magnitude of 1000’s of blocks trying to get this scenario before the precondition is met. + +Obviously, the probability becomes substantially higher as the cabal’s voting power nears ⅔. This is because both n decreases and there is greater tolerance to send prevotes to a greater amount of validators without going overboard and reaching the ⅓ precommit threshold in the first round which would mean they would have to try again. + +Once we’ve got our n, we can then forge the remaining signatures for that block (from the f) and bundle them all together and tada we have a forged signed header. + +Now we’ve done that, it’s time to find some light clients to fool. + +Also critical to this type of attack is that the light client that is connected to our nodes must request a light block at that specific height with which we forged this signed header but this shouldn’t be hard to do. To bring this back to a real context, say our faulty cabal, f, bought some groceries using atoms and then wanted to prove that they did, the grocery owner whips out their phone, runs the light client and f tells them the height they committed the transaction. + +An important note here is that because the validator sets are the same between the canonical and the forged block, this attack also works on light clients that verify sequentially. In fact, they are especially vulnerable because they currently don’t run the detector function afterwards. + +However, if our grocery owner verifies using the skipping algorithm, they will then run the detector and therefore they will compare with other witness nodes. Ideally for our attackers, if f has a lot of nodes exposing their rpc endpoints, then there is a chance that all the witnesses the light client has are faulty and thus we have a successful attack and the grocery owner has been fooled into handing f a few apples and carrots. + +However, there is a greater chance, especially if the light client is connected to quite a few other nodes that a divergence will be detected. The light client will figure out there was an amnesia attack and send the evidence to the witness to commit on chain. The grocery owner will see that verification failed and won't hand over the apples or carrots but also f won't be punished for their villainous behavior. This means that they can go over to the hairdressers and see if they can pull off the same stunt again. + +So this brings to the fore the current defenses that are in place. As long as there has not been a cabal of validators with greater than 1/3 power (or the trust level), the light clients verification algorithm will prevent any attempts to deceive it. Greater than this threshold and we rely on the detector as a second layer of defense to pick up on any attack. It's security is chiefly tied with the assumption that at least one of the witnesses is correct. If this fails then as illustrated above, the light client can be suceptible to amnesia (as well as equivocation and lunatic) attacks. + +The outstanding problem, if we indeed consider it big enough to be one, therefore lies in the incentivisation mechanism which is how f and other malicious validators are punished. This is decided by the application but it's up to Tendermint to identify them. With other forms of attacks the evidence lies in the precommits. But because an amnesia attack uses precommits from another round, which is information that is discarded by the consensus engine once the block is committed, it is difficult to understand which validators were in fact faulty. + +If we cast our minds back to what I previously wrote, part of an amnesia attack depends on getting n precommits from an earlier round. These are then bundled with the malicious validators' own signatures. This means that the light client nor full nodes are capable of distinguishing which of the signatures were correctly created as part of Tendermint consensus and which were forged later on. + +## Appendix B: Prior Amnesia Evidence Accountability Implementation + +As the distinction between these two attacks (amnesia and back to the past) can only be distinguished by confirming with all validators (to see if it is a full fork or a light fork), for the purpose of simplicity, these attacks will be treated as the same. + +Currently, the evidence reactor is used to simply broadcast and store evidence. The idea of creating a new reactor for the specific task of verifying these attacks was briefly discussed, but it is decided that the current evidence reactor will be extended. + +The process begins with a light client receiving conflicting headers (in the future this could also be a full node during fast sync or state sync), which it sends to a full node to analyze. As part of [evidence handling](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-047-handling-evidence-from-light-client.md), this is extracted into potential amnesia evidence when the validator voted in more than one round for a different block. + +```golang +type PotentialAmnesiaEvidence struct { + VoteA *types.Vote + VoteB *types.Vote + + Heightstamp int64 +} +``` + +*NOTE: There had been an earlier notion towards batching evidence against the entire set of validators all together but this has given way to individual processing predominantly to maintain consistency with the other forms of evidence. A more extensive breakdown can be found [here](https://github.com/tendermint/tendermint/issues/4729)* + +The evidence will contain the precommit votes for a validator that voted for both rounds. If the validator voted in more than two rounds, then they will have multiple `PotentialAmnesiaEvidence` against them hence it is possible that there is multiple evidence for a validator in a single height but not for a single round. The votes should be all valid and the height and time that the infringement was made should be within: + +`MaxEvidenceAge - ProofTrialPeriod` + +This trial period will be discussed later. + +Returning to the event of an amnesia attack, if we were to examine the behavior of the honest nodes, C1 and C2, in the schematic, C2 will not PRECOMMIT an earlier round, but it is likely, if a node in C1 were to receive +2/3 PREVOTE's or PRECOMMIT's for a higher round, that it would remove the lock and PREVOTE and PRECOMMIT for the later round. Therefore, unfortunately it is not a case of simply punishing all nodes that have double voted in the `PotentialAmnesiaEvidence`. + +Instead we use the Proof of Lock Change (PoLC) referred to in the [consensus spec](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md#terms). When an honest node votes again for a different block in a later round +(which will only occur in very rare cases), it will generate the PoLC and store it in the evidence reactor for a time equal to the `MaxEvidenceAge` + +```golang +type ProofOfLockChange struct { + Votes []*types.Vote + PubKey crypto.PubKey +} +``` + +This can be either evidence of +2/3 PREVOTES or PRECOMMITS (either warrants the honest node the right to vote) and is valid, among other checks, so long as the PRECOMMIT vote of the node in V2 came after all the votes in the `ProofOfLockChange` i.e. it received +2/3 votes for a block and then voted for that block thereafter (F is unable to prove this). + +In the event that an honest node receives `PotentialAmnesiaEvidence` it will first `ValidateBasic()` and `Verify()` it and then will check if it is among the suspected nodes in the evidence. If so, it will retrieve the `ProofOfLockChange` and combine it with `PotentialAmensiaEvidence` to form `AmensiaEvidence`. All honest nodes that are part of the indicted group will have a time, measured in blocks, equal to `ProofTrialPeriod`, the aforementioned evidence paramter, to gossip their `AmnesiaEvidence` with their `ProofOfLockChange` + +```golang +type AmnesiaEvidence struct { + *types.PotentialAmnesiaEvidence + Polc *types.ProofOfLockChange +} +``` + +If the node is not required to submit any proof than it will simply broadcast the `PotentialAmnesiaEvidence`, stamp the height that it received the evidence and begin to wait out the trial period. It will ignore other `PotentialAmnesiaEvidence` gossiped at the same height and round. + +If a node receives `AmnesiaEvidence` that contains a valid `ProofOfClockChange` it will add it to the evidence store and replace any PotentialAmnesiaEvidence of the same height and round. At this stage, an amnesia evidence with polc, it is ready to be submitted to the chin. If a node receives `AmnesiaEvidence` with an empty polc it will ignore it as each honest node will conduct their own trial period to be sure that time was given for any other honest nodes to respond. + +There can only be one `AmnesiaEvidence` and one `PotentialAmneisaEvidence` stored for each attack (i.e. for each height). + +When, `state.LastBlockHeight > PotentialAmnesiaEvidence.timestamp + ProofTrialPeriod`, nodes will upgrade the corresponding `PotentialAmnesiaEvidence` and attach an empty `ProofOfLockChange`. Then honest validators of the current validator set can begin proposing the block that contains the `AmnesiaEvidence`. + +*NOTE: Even before the evidence is proposed and committed, the off-chain process of gossiping valid evidence could be + enough for honest nodes to recognize the fork and halt.* + +Other validators will vote if: + +- The Amnesia Evidence is not valid +- The Amensia Evidence is not within their own trial period i.e. too soon. +- They don't have the Amnesia Evidence and it is has an empty polc (each validator needs to run their own trial period of the evidence) +- Is of an AmnesiaEvidence that has already been committed to the chain. + +Finally it is important to stress that the protocol of having a trial period addresses attacks where a validator voted again for a different block at a later round and time. In the event, however, that the validator voted for an earlier round after voting for a later round i.e. `VoteA.Timestamp < VoteB.Timestamp && VoteA.Round > VoteB.Round` then this action is inexcusable and can be punished immediately without the need of a trial period. In this case, PotentialAmnesiaEvidence will be instantly upgraded to AmnesiaEvidence. diff --git a/docs/architecture/adr-056-proving-amnesia-attacks.md b/docs/architecture/adr-056-proving-amnesia-attacks.md deleted file mode 100644 index f0200ca7d..000000000 --- a/docs/architecture/adr-056-proving-amnesia-attacks.md +++ /dev/null @@ -1,120 +0,0 @@ -# ADR 056: Proving amnesia attacks - -## Changelog - -- 02.04.20: Initial Draft -- 06.04.20: Second Draft - -## Context - -Whilst most created evidence of malicious behaviour is self evident such that any individual can verify them independently there are types of evidence, known collectively as global evidence, that require further collaboration from the network in order to accumulate enough information to create evidence that is individually verifiable and can therefore be processed through consensus. [Fork Accountability](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) has been coined to describe the entire process of detection, proving and punishing of malicious behaviour. This ADR addresses specifically how to prove an amnesia attack but also generally outlines how global evidence can be converted to individual evidence. - -### Amnesia Attack - -The currently only known form of global evidence stems from [flip flopping](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md#flip-flopping) attacks. The schematic below explains one scenario where an amnesia attack, a form of flip flopping, can occur such that two sets of honest nodes, C1 and C2, commit different blocks. - -![](../imgs/tm-amnesia-attack.png) - -1. C1 and F send PREVOTE messages for block A. -2. C1 sends PRECOMMIT for round 1 for block A. -3. A new round is started, C2 and F send PREVOTE messages for a different block B. -4. C2 and F then send PRECOMMIT messages for block B. -5. F breaks the lock and goes back and sends PRECOMMIT messages in round 1 for block A. - - -This creates a fork on the main chain. Back to the past, another form of flip flopping, creates a light fork (capable of fooling those not involved in consensus), in a similar way, with F taking the precommits from C1 and forging a commit from them. - -## Decision - -As the distinction between these two attacks (amnesia and back to the past) can only be distinguished by confirming with all validators (to see if it is a full fork or a light fork), for the purpose of simplicity, these attacks will be treated as the same. - -Currently, the evidence reactor is used to simply broadcast and store evidence. Instead of perhaps creating a new reactor for the specific task of verifying these attacks, the current evidence reactor will be extended. - -The process begins with a light client receiving conflicting headers (in the future this could also be a full node during fast sync), which it sends to a full node to analyse. As part of [evidence handling](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-047-handling-evidence-from-light-client.md), this could be deduced into potential amnesia evidence - -```golang -type PotentialAmnesiaEvidence struct { - V1 []*types.Vote - V2 []*types.Vote - - timestamp time.Time -} -``` - -*NOTE: Unlike prior evidence types, `PotentialAmnesiaEvidence` and `AmnesiaEvidence` are processed as a batch instead - of individually. This will require changes to much of the API.* - - *NOTE: `PotentialAmnesiaEvidence` could be constructed for when 1/3 or less vote in two different rounds but as it is not currently detected nor can it cause a fork, it will be ignored.* - -The evidence should contain the precommit votes for the intersection of validators that voted for both rounds. The votes should be all valid and the height and time that the infringement was made should be within: - -`MaxEvidenceAge - Amnesia trial period` - -where `Amnesia trial period` is a configurable duration defaulted at 1 day. - -With reference to the honest nodes, C1 and C2, in the schematic, C2 will not PRECOMMIT an earlier round, but it is likely, if a node in C1 were to receive +2/3 PREVOTE's or PRECOMMIT's for a higher round, that it would remove the lock and PREVOTE and PRECOMMIT for the later round. Therefore, unfortunately it is not a case of simply punishing all nodes that have double voted in the `PotentialAmnesiaEvidence`. - -Instead we use the Proof of Lock Change (PoLC) referred to in the [consensus spec](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md#terms). When an honest node votes again for a different block in a later round -(which will only occur in very rare cases), it will generate the PoLC and store it in the evidence reactor for a time equal to the `MaxEvidenceAge` - -```golang -type ProofOfLockChange struct { - Votes []*types.Vote -} -``` - -This can be either evidence of +2/3 PREVOTES or PRECOMMITS (either warrants the honest node the right to vote) and is valid, among other checks, so long as the PRECOMMIT vote of the node in V2 came after all the votes in the `ProofOfLockChange` i.e. it received +2/3 votes for a block and then voted for that block thereafter (F is unable to prove this). - -In the event that an honest node receives `PotentialAmnesiaEvidence` it will first `Verify()` it and then will check if it is among the suspected nodes in the evidence. If so, it will retrieve the `ProofOfLockChange` and combine it with `PotentialAmensiaEvidence` to form `AmensiaEvidence`: - -```golang -type AmnesiaEvidence struct { - Evidence *types.PotentialAmnesiaEvidence - Proofs []*types.ProofOfLockChange -} -``` - -If the node is not required to submit any proof than it will simply broadcast the `PotentialAmnesiaEvidence` . - -When a node has successfully validated `PotentialAmnesiaEvidence` it timestamps it and refuses to receive the same form of `PotentialAmnesiaEvidence`. If a node receives `AmnesiaEvidence` it checks it against any current `AmnesiaEvidence` it might have and if so merges the two by adding the proofs, if it doesn't have it yet it run's `Verify()` and stores it. - -There can only be one `AmnesiaEvidence` and one `PotentialAmneisaEvidence` stored for each attack (i.e. for each height). - -When, `time.Now() > PotentialAmnesiaEvidence.timestamp + AmnesiaTrialPeriod`, honest validators of the current validator set can begin proposing the block that contains the `AmnesiaEvidence`. - -*NOTE: Even before the evidence is proposed and committed, the off-chain process of gossiping valid evidence could be - enough for honest nodes to recognize the fork and halt.* - -Other validators will vote if: - -- The Amnesia Evidence is not valid -- The Amensia Evidence is not within the validators trial period i.e. too soon. -- The Amensia Evidence is of the same height but is different to the Amnesia Evidence that they have. i.e. is missing proofs. - (In this case, the validator will try again to gossip the latest Amnesia Evidence that it has) -- Is of an AmnesiaEvidence that has already been committed to the chain. - - -## Status - -Proposed - -## Consequences - -### Positive - -Increasing fork detection makes the system more secure - -### Negative - -Non-responsive but honest nodes that are part of the suspect group that don't produce a proof will be punished - -A delay between the detection of a fork and the punishment of one - -### Neutral - -Evidence package will need to be able to handle batch evidence as well as individual evidence (i.e. extra work) - -## References - -- [Fork accountability algorithm](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit) -- [Fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) diff --git a/docs/architecture/adr-057-RPC.md b/docs/architecture/adr-057-RPC.md new file mode 100644 index 000000000..5e7c9f1dc --- /dev/null +++ b/docs/architecture/adr-057-RPC.md @@ -0,0 +1,90 @@ +# ADR 057: RPC + +## Changelog + +- 19-05-2020: created + +## Context + +Currently the RPC layer of Tendermint is using a variant of the JSON-RPC protocol. This ADR is meant to serve as a pro/con list for possible alternatives and JSON-RPC. + +There are currently two options being discussed: gRPC & JSON-RPC. + +### JSON-RPC + +JSON-RPC is a JSON-based RPC protocol. Tendermint has implemented its own variant of JSON-RPC which is not compatible with the [JSON-RPC 2.0 specification](https://www.jsonrpc.org/specification). + +**Pros:** + +- Easy to use & implement (by default) +- Well-known and well-understood by users and integrators +- Integrates reasonably well with web infrastructure (proxies, API gateways, service meshes, caches, etc) +- human readable encoding (by default) + +**Cons:** + +- No schema support +- RPC clients must be hand-written +- Streaming not built into protocol +- Underspecified types (e.g. numbers and timestamps) +- Tendermint has its own implementation (not standards compliant, maintenance overhead) + - High maintenance cost associated to this +- Stdlib `jsonrpc` package only supports JSON-RPC 1.0, no dominant package for JSON-RPC 2.0 +- Tooling around documentation/specification (e.g. Swagger) could be better +- JSON data is larger (offset by HTTP compression) +- Serializing is slow ([~100% marshal, ~400% unmarshal](https://github.com/alecthomas/go_serialization_benchmarks)); insignificant in absolute terms +- Specification was last updated in 2013 and is way behind Swagger/OpenAPI + +### gRPC + gRPC-gateway (REST + Swagger) + +gRPC is a high performant RPC framework. It has been battle tested by a large number of users and is heavily relied on and maintained by countless large corporations. + +**Pros:** + +- Efficient data retrieval for users, lite clients and other protocols +- Easily implemented in supported languages (Go, Dart, JS, TS, rust, Elixir, Haskell, ...) +- Defined schema with richer type system (Protocol Buffers) +- Can use common schemas and types across all protocols and data stores (RPC, ABCI, blocks, etc) +- Established conventions for forwards- and backwards-compatibility +- Bi-directional streaming +- Servers and clients are be autogenerated in many languages (e.g. Tendermint-rs) +- Auto-generated swagger documentation for REST API +- Backwards and forwards compatibility guarantees enforced at the protocol level. +- Can be used with different codecs (JSON, CBOR, ...) + +**Cons:** + +- Complex system involving cross-language schemas, code generation, and custom protocols +- Type system does not always map cleanly to native language type system; integration woes +- Many common types require Protobuf plugins (e.g. timestamps and duration) +- Generated code may be non-idiomatic and hard to use +- Migration will be disruptive and laborious + +## Decision + +> This section explains all of the details of the proposed solution, including implementation details. +> It should also describe affects / corollary items that may need to be changed as a part of this. +> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. +> (e.g. the optimal split of things to do between separate PR's) + +## Status + +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. + +{Deprecated|Proposed|Accepted} + +## Consequences + +> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. + +### Positive + +### Negative + +### Neutral + +## References + +> Are there any relevant PR comments, issues that led up to this, or articles referenced for why we made the given design choice? If so link them here! + +- {reference link} diff --git a/docs/architecture/adr-058-event-hashing.md b/docs/architecture/adr-058-event-hashing.md new file mode 100644 index 000000000..184b921d5 --- /dev/null +++ b/docs/architecture/adr-058-event-hashing.md @@ -0,0 +1,122 @@ +# ADR 058: Event hashing + +## Changelog + +- 2020-07-17: initial version +- 2020-07-27: fixes after Ismail and Ethan's comments +- 2020-07-27: declined + +## Context + +Before [PR#4845](https://github.com/tendermint/tendermint/pull/4845), +`Header#LastResultsHash` was a root of the Merkle tree built from `DeliverTx` +results. Only `Code`, `Data` fields were included because `Info` and `Log` +fields are non-deterministic. + +At some point, we've added events to `ResponseBeginBlock`, `ResponseEndBlock`, +and `ResponseDeliverTx` to give applications a way to attach some additional +information to blocks / transactions. + +Many applications seem to have started using them since. + +However, before [PR#4845](https://github.com/tendermint/tendermint/pull/4845) +there was no way to prove that certain events were a part of the result +(_unless the application developer includes them into the state tree_). + +Hence, [PR#4845](https://github.com/tendermint/tendermint/pull/4845) was +opened. In it, `GasWanted` along with `GasUsed` are included when hashing +`DeliverTx` results. Also, events from `BeginBlock`, `EndBlock` and `DeliverTx` +results are hashed into the `LastResultsHash` as follows: + +- Since we do not expect `BeginBlock` and `EndBlock` to contain many events, + these will be Protobuf encoded and included in the Merkle tree as leaves. +- `LastResultsHash` therefore is the root hash of a Merkle tree w/ 3 leafs: + proto-encoded `ResponseBeginBlock#Events`, root hash of a Merkle tree build + from `ResponseDeliverTx` responses (Log, Info and Codespace fields are + ignored), and proto-encoded `ResponseEndBlock#Events`. +- Order of events is unchanged - same as received from the ABCI application. + +[Spec PR](https://github.com/tendermint/spec/pull/97/files) + +While it's certainly good to be able to prove something, introducing new events +or removing such becomes difficult because it breaks the `LastResultsHash`. It +means that every time you add, remove or update an event, you'll need a +hard-fork. And that is undoubtedly bad for applications, which are evolving and +don't have a stable events set. + +## Decision + +As a middle ground approach, the proposal is to add the +`Block#LastResultsEvents` consensus parameter that is a list of all events that +are to be hashed in the header. + +``` +@ proto/tendermint/abci/types.proto:295 @ message BlockParams { + int64 max_bytes = 1; + // Note: must be greater or equal to -1 + int64 max_gas = 2; + // List of events, which will be hashed into the LastResultsHash + repeated string last_results_events = 3; +} +``` + +Initially the list is empty. The ABCI application can change it via `InitChain` +or `EndBlock`. + +Example: + +```go +func (app *MyApp) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { + //... + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: []byte("sender"), Value: []byte("Bob"), Index: true}, + }, + }, + } + return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} +} +``` + +For "transfer" event to be hashed, the `LastResultsEvents` must contain a +string "transfer". + +## Status + +Declined + +**Until there's more stability/motivation/use-cases/demand, the decision is to +push this entirely application side and just have apps which want events to be +provable to insert them into their application-side merkle trees. Of course +this puts more pressure on their application state and makes event proving +application specific, but it might help built up a better sense of use-cases +and how this ought to ultimately be done by Tendermint.** + +## Consequences + +### Positive + +1. networks can perform parameter change proposals to update this list as new events are added +2. allows networks to avoid having to do hard-forks +3. events can still be added at-will to the application w/o breaking anything + +### Negative + +1. yet another consensus parameter +2. more things to track in the tendermint state + +## References + +- [ADR 021](./adr-021-abci-events.md) +- [Indexing transactions](../app-dev/indexing-transactions.md) + +## Appendix A. Alternative proposals + +The other proposal was to add `Hash bool` flag to the `Event`, similarly to +`Index bool` EventAttribute's field. When `true`, Tendermint would hash it into +the `LastResultsEvents`. The downside is that the logic is implicit and depends +largely on the node's operator, who decides what application code to run. The +above proposal makes it (the logic) explicit and easy to upgrade via +governance. diff --git a/docs/architecture/adr-059-evidence-composition-and-lifecycle.md b/docs/architecture/adr-059-evidence-composition-and-lifecycle.md new file mode 100644 index 000000000..707a18bfb --- /dev/null +++ b/docs/architecture/adr-059-evidence-composition-and-lifecycle.md @@ -0,0 +1,298 @@ +# ADR 059: Evidence Composition and Lifecycle + +## Changelog + +- 04/09/2020: Initial Draft (Unabridged) +- 07/09/2020: First Version + +## Scope + +This document is designed to collate together and surface some predicaments involving evidence in Tendermint: both its composition and lifecycle. It then aims to find a solution to these. The scope does not extend to the verification nor detection of certain types of evidence but concerns itself mainly with the general form of evidence and how it moves from inception to application. + +## Background + +For a long time `DuplicateVoteEvidence`, formed in the consensus reactor, was the only evidence Tendermint had. It was produced whenever two votes from the same validator in the same round +was observed and thus it was designed that each evidence was for a single validator. It was predicted that there may come more forms of evidence and thus `DuplicateVoteEvidence` was used as the model for the `Evidence` interface and also for the form of the evidence data sent to the application. It is important to note that Tendermint concerns itself just with the detection and reporting of evidence and it is the responsibility of the application to exercise punishment. + +```go +type Evidence interface { //existing + Height() int64 // height of the offense + Time() time.Time // time of the offense + Address() []byte // address of the offending validator + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence + Equal(Evidence) bool // check equality of evidence + + ValidateBasic() error + String() string +} +``` + +```go +type DuplicateVoteEvidence struct { + VoteA *Vote + VoteB *Vote + + timestamp time.Time // taken from the block time +} +``` + +Tendermint has now introduced a new type of evidence to protect light clients from being attacked. This `LightClientAttackEvidence` (see [here](https://github.com/informalsystems/tendermint-rs/blob/31ca3e64ce90786c1734caf186e30595832297a4/docs/spec/lightclient/attacks/evidence-handling.md) for more information) is vastly different to `DuplicateVoteEvidence` in that it is physically a much different size containing a complete signed header and validator set. It is formed within the light client, not the consensus reactor and requires a lot more information from state to verify (`VerifyLightClientAttack(commonHeader, trustedHeader *SignedHeader, commonVals *ValidatorSet)` vs `VerifyDuplicateVote(chainID string, pubKey PubKey)`). Finally it batches validators together (a single piece of evidence that implicates multiple malicious validators at a height) as opposed to having individual evidence (each piece of evidence is per validator per height). This evidence stretches the existing mould that was used to accommodate new types of evidence and has thus caused us to reconsider how evidence should be formatted and processed. + +```go +type LightClientAttackEvidence struct { // proposed struct in spec + ConflictingBlock *LightBlock + CommonHeight int64 + Type AttackType // enum: {Lunatic|Equivocation|Amnesia} + + timestamp time.Time // taken from the block time at the common height +} +``` +*Note: These three attack types have been proven by the research team to be exhaustive* + +## Possible Approaches for Evidence Composition + +### Individual framework + +Evidence remains on a per validator basis. This causes the least disruption to the current processes but requires that we break `LightClientAttackEvidence` into several pieces of evidence for each malicious validator. This not only has performance consequences in that there are n times as many database operations and that the gossiping of evidence will require more bandwidth then necessary (by requiring a header for each piece) but it potentially impacts our ability to validate it. In batch form, the full node can run the same process the light client did to see that 1/3 validating power was present in both the common block and the conflicting block whereas this becomes more difficult to verify individually without opening the possibility that malicious validators forge evidence against innocent . Not only that, but `LightClientAttackEvidence` also deals with amnesia attacks which unfortunately have the characteristic where we know the set of validators involved but not the subset that were actually malicious (more to be said about this later). And finally splitting the evidence into individual pieces makes it difficult to understand the severity of the attack (i.e. the total voting power involved in the attack) + +#### An example of a possible implementation path + +We would ignore amnesia evidence (as individually it's hard to make) and revert to the initial split we had before where `DuplicateVoteEvidence` is also used for light client equivocation attacks and thus we only need `LunaticEvidence`. We would also most likely need to remove `Verify` from the interface as this isn't really something that can be used. + +``` go +type LunaticEvidence struct { // individual lunatic attack + header *Header + commonHeight int64 + vote *Vote + + timestamp time.Time // once again taken from the block time at the height of the common header +} +``` + +### Batch Framework + +The last approach of this category would be to consider batch only evidence. This works fine with `LightClientAttackEvidence` but would require alterations to `DuplicateVoteEvidence` which would most likely mean that the consensus would send conflicting votes to a buffer in the evidence module which would then wrap all the votes together per height before gossiping them to other nodes and trying to commit it on chain. At a glance this may improve IO and verification speed and perhaps more importantly grouping validators gives the application and Tendermint a better overview of the severity of the attack. + +However individual evidence has the advantage that it is easy to check if a node already has that evidence meaning we just need to check hashes to know that we've already verified this evidence before. Batching evidence would imply that each node may have a different combination of duplicate votes which may complicate things. + +#### An example of a possible implementation path + +`LightClientAttackEvidence` won't change but the evidence interface will need to look like the proposed one above and `DuplicateVoteEvidence` will need to change to encompass multiple double votes. A problem with batch evidence is that it needs to be unique to avoid people from submitting different permutations. + +## Decision + +The decision is to adopt a hybrid design. + +We allow individual and batch evidence to coexist together, meaning that verification is done depending on the evidence type and that the bulk of the work is done in the evidence pool itself (including forming the evidence to be sent to the application). + + +## Detailed Design + +Evidence has the following simple interface: + +```go +type Evidence interface { //proposed + Height() int64 // height of the offense + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + ValidateBasic() error + String() string +} +``` + +The changing of the interface is backwards compatible as these methods are all present in the previous version of the interface. However, networks will need to upgrade to be able to process the new evidence as verification has changed. + +We have two concrete types of evidence that fulfil this interface + +```go +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock + CommonHeight int64 // the last height at which the primary provider and witness provider had the same header +} +``` +where the `Hash()` is the hash of the header and commonHeight. + +Note: It was also discussed whether to include the commit hash which captures the validators that signed the header. However this would open the opportunity for someone to propose multiple permutations of the same evidence (through different commit signatures) hence it was omitted. Consequentially, when it comes to verifying evidence in a block, for `LightClientAttackEvidence` we can't just check the hashes because someone could have the same hash as us but a different commit where less than 1/3 validators voted which would be an invalid version of the evidence. (see `fastCheck` for more details) + +```go +type DuplicateVoteEvidence { + VoteA *Vote + VoteB *Vote +} +``` +where the `Hash()` is the hash of the two votes + +For both of these types of evidence, `Bytes()` represents the proto-encoded byte array format of the evidence and `ValidateBasic` is +an initial consistency check to make sure the evidence has a valid structure. + +### The Evidence Pool + +`LightClientAttackEvidence` is generated in the light client and `DuplicateVoteEvidence` in consensus. Both are sent to the evidence pool through `AddEvidence(ev Evidence) error`. The evidence pool's primary purpose is to verify evidence. It also gossips evidence to other peers' evidence pool and serves it to consensus so it can be committed on chain and the relevant information can be sent to the application in order to exercise punishment. When evidence is added, the pool first runs `Has(ev Evidence)` to check if it has already received it (by comparing hashes) and then `Verify(ev Evidence) error`. Once verified the evidence pool stores it it's pending database. There are two databases: one for pending evidence that is not yet committed and another of the committed evidence (to avoid committing evidence twice) + +#### Verification + +`Verify()` does the following: + +- Use the hash to see if we already have this evidence in our committed database. + +- Use the height to check if the evidence hasn't expired. + +- If it has expired then use the height to find the block header and check if the time has also expired in which case we drop the evidence + +- Then proceed with switch statement for each of the two evidence: + +For `DuplicateVote`: + +- Check that height, round, type and validator address are the same + +- Check that the Block ID is different + +- Check the look up table for addresses to make sure there already isn't evidence against this validator + +- Fetch the validator set and confirm that the address is in the set at the height of the attack + +- Check that the chain ID and signature is valid. + +For `LightClientAttack` + +- Fetch the common signed header and val set from the common height and use skipping verification to verify the conflicting header + +- Fetch the trusted signed header at the same height as the conflicting header and compare with the conflicting header to work out which type of attack it is and in doing so return the malicious validators. + + - If equivocation, return the validators that signed for the commits of both the trusted and signed header + + - If lunatic, return the validators from the common val set that signed in the conflicting block + + - If amnesia, return no validators (since we can't know which validators are malicious). This also means that we don't currently send amnesia evidence to the application, although we will introduce more robust amnesia evidence handling in future Tendermint Core releases + +- For each validator, check the look up table to make sure there already isn't evidence against this validator + +After verification we persist the evidence with the key `height/hash` to the pending evidence database in the evidence pool with the following format: + +```go +type EvidenceInfo struct { + ev Evidence + time time.Time + validators []Validator + totalVotingPower int64 +} +``` + +`time`, `validators` and `totalVotingPower` are need to form the `abci.Evidence` that we send to the application layer. More in this to come later. + + +#### Broadcasting and receiving evidence + +The evidence pool also runs a reactor that broadcasts the newly validated +evidence to all connected peers. + +Receiving evidence from other evidence reactors works in the same manner as receiving evidence from the consensus reactor or a light client. + + +#### Proposing evidence on the block + +When it comes to prevoting and precomitting a proposal that contains evidence, the full node will once again +call upon the evidence pool to verify the evidence using `CheckEvidence(ev []Evidence)`: + +This performs the following actions: + +1. Loops through all the evidence to check that nothing has been duplicated + +2. For each evidence, run `fastCheck(ev evidence)` which works similar to `Has` but instead for `LightClientAttackEvidence` if it has the +same hash it then goes on to check that the validators it has are all signers in the commit of the conflicting header. If it doesn't pass fast check (because it hasn't seen the evidence before) then it will have to verify the evidence. + +3. runs `Verify(ev Evidence)` - Note: this also saves the evidence to the db as mentioned before. + + +#### Updating application and pool + +The final part of the lifecycle is when the block is committed and the `BlockExecutor` then updates state. As part of this process, the `BlockExecutor` gets the evidence pool to create a simplified format for the evidence to be sent to the application. This happens in `ApplyBlock` where the executor calls `Update(Block, State) []abci.Evidence`. + +```go +abciResponses.BeginBlock.ByzantineValidators = evpool.Update(block, state) +``` + +Here is the format of the evidence that the application will receive. As seen above, this is stored as an array within `BeginBlock`. +The changes to the application are minimal (it is still formed one for each malicious validator) with the exception of using an enum instead of a string for the evidence type. + +```go +type Evidence struct { + // either LightClientAttackEvidence or DuplicateVoteEvidence as an enum (abci.EvidenceType) + Type EvidenceType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.EvidenceType" json:"type,omitempty"` + // The offending validator + Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + // The height when the offense occurred + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + // The corresponding time where the offense occurred + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} +``` + + +This `Update()` function does the following: + +- Increments state which keeps track of both the current time and height used for measuring expiry + +- Marks evidence as committed and saves to db. This prevents validators from proposing committed evidence in the future + Note: the db just saves the height and the hash. There is no need to save the entire committed evidence + +- Forms ABCI evidence as such: (note for `DuplicateVoteEvidence` the validators array size is 1) + ```go + for _, val := range evInfo.Validators { + abciEv = append(abciEv, &abci.Evidence{ + Type: evType, // either DuplicateVote or LightClientAttack + Validator: val, // the offending validator (which includes the address, pubkey and power) + Height: evInfo.ev.Height(), // the height when the offense happened + Time: evInfo.time, // the time when the offense happened + TotalVotingPower: evInfo.totalVotingPower // the total voting power of the validator set + }) + } + ``` + +- Removes expired evidence from both pending and committed databases + +The ABCI evidence is then sent via the `BlockExecutor` to the application. + +#### Summary + +To summarize, we can see the lifecycle of evidence as such: + +![evidence_lifecycle](../imgs/evidence_lifecycle.png) + +Evidence is first detected and created in the light client and consensus reactor. It is verified and stored as `EvidenceInfo` and gossiped to the evidence pools in other nodes. The consensus reactor later communicates with the evidence pool to either retrieve evidence to be put into a block, or verify the evidence the consensus reactor has retrieved in a block. Lastly when a block is added to the chain, the block executor sends the committed evidence back to the evidence pool so a pointer to the evidence can be stored in the evidence pool and it can update it's height and time. Finally, it turns the committed evidence into ABCI evidence and through the block executor passes the evidence to the application so the application can handle it. + +## Status + +Implemented + +## Consequences + + + +### Positive + +- Evidence is better contained to the evidence pool / module +- LightClientAttack is kept together (easier for verification and bandwidth) +- Variations on commit sigs in LightClientAttack doesn't lead to multiple permutations and multiple evidence +- Address to evidence map prevents DOS attacks, where a single validator could DOS the network by flooding it with evidence submissions + +### Negative + +- Changes the `Evidence` interface and thus is a block breaking change +- Changes the ABCI `Evidence` and is thus a ABCI breaking change +- Unable to query evidence for address / time without evidence pool + +### Neutral + + +## References + + + +- [LightClientAttackEvidence](https://github.com/informalsystems/tendermint-rs/blob/31ca3e64ce90786c1734caf186e30595832297a4/docs/spec/lightclient/attacks/evidence-handling.md) diff --git a/docs/architecture/adr-060-go-api-stability.md b/docs/architecture/adr-060-go-api-stability.md new file mode 100644 index 000000000..112cf4ad0 --- /dev/null +++ b/docs/architecture/adr-060-go-api-stability.md @@ -0,0 +1,193 @@ +# ADR 060: Go API Stability + +## Changelog + +- 2020-09-08: Initial version. (@erikgrinaker) + +- 2020-09-09: Tweak accepted changes, add initial public API packages, add consequences. (@erikgrinaker) + +- 2020-09-17: Clarify initial public API. (@erikgrinaker) + +## Context + +With the release of Tendermint 1.0 we will adopt [semantic versioning](https://semver.org). One major implication is a guarantee that we will not make backwards-incompatible changes until Tendermint 2.0 (except in pre-release versions). In order to provide this guarantee for our Go API, we must clearly define which of our APIs are public, and what changes are considered backwards-compatible. + +Currently, we list packages that we consider public in our [README](https://github.com/tendermint/tendermint#versioning), but since we are still at version 0.x we do not provide any backwards compatiblity guarantees at all. + +### Glossary + +* **External project:** a different Git/VCS repository or code base. + +* **External package:** a different Go package, can be a child or sibling package in the same project. + +* **Internal code:** code not intended for use in external projects. + +* **Internal directory:** code under `internal/` which cannot be imported in external projects. + +* **Exported:** a Go identifier starting with an uppercase letter, which can therefore be accessed by an external package. + +* **Private:** a Go identifier starting with a lowercase letter, which therefore cannot be accessed by an external package unless via an exported field, variable, or function/method return value. + +* **Public API:** any Go identifier that can be imported or accessed by an external project, except test code in `_test.go` files. + +* **Private API:** any Go identifier that is not accessible via a public API, including all code in the internal directory. + +## Alternative Approaches + +- Split all public APIs out to separate Go modules in separate Git repositories, and consider all Tendermint code internal and not subject to API backwards compatibility at all. This was rejected, since it has been attempted by the Tendermint project earlier, resulting in too much dependency management overhead. + +- Simply document which APIs are public and which are private. This is the current approach, but users should not be expected to self-enforce this, the documentation is not always up-to-date, and external projects will often end up depending on internal code anyway. + +## Decision + +From Tendermint 1.0, all internal code (except private APIs) will be placed in a root-level [`internal` directory](https://golang.org/cmd/go/#hdr-Internal_Directories), which the Go compiler will block for use by external projects. All exported items outside of the `internal` directory are considered a public API and subject to backwards compatibility guarantees, except files ending in `_test.go`. + +The `crypto` package may be split out to a separate module in a separate repo. This is the main general-purpose package used by external projects, and is the only Tendermint dependency in e.g. IAVL which can cause some problems for projects depending on both IAVL and Tendermint. This will be decided after further discussion. + +The `tm-db` package will remain a separate module in a separate repo. The `crypto` package may possibly be split out, pending further discussion, as this is the main general-purpose package used by other projects. + +## Detailed Design + +### Public API + +When preparing our public API for 1.0, we should keep these principles in mind: + +- Limit the number of public APIs that we start out with - we can always add new APIs later, but we can't change or remove APIs once they're made public. + +- Before an API is made public, do a thorough review of the API to make sure it covers any future needs, can accomodate expected changes, and follows good API design practices. + +The following is the minimum set of public APIs that will be included in 1.0, in some form: + +- `abci` +- `node` and related packages (e.g. possibly `config`, `libs/log`, and `version`) +- Client APIs, i.e. `rpc/client`, `light`, and `privval`. +- `crypto` (possibly as a separate repo) + +We may offer additional APIs as well, following further discussions internally and with other stakeholders. However, public APIs for providing custom components (e.g. reactors and mempools) are not planned for 1.0, but may be added in a later 1.x version if this is something we want to offer. + +For comparison, the following are the number of Tendermint imports in the Cosmos SDK (excluding tests), which should be mostly satisfied by the planned APIs. + +``` + 1 github.com/tendermint/tendermint/abci/server + 73 github.com/tendermint/tendermint/abci/types + 2 github.com/tendermint/tendermint/cmd/tendermint/commands + 7 github.com/tendermint/tendermint/config + 68 github.com/tendermint/tendermint/crypto + 1 github.com/tendermint/tendermint/crypto/armor + 10 github.com/tendermint/tendermint/crypto/ed25519 + 2 github.com/tendermint/tendermint/crypto/encoding + 3 github.com/tendermint/tendermint/crypto/merkle + 3 github.com/tendermint/tendermint/crypto/sr25519 + 8 github.com/tendermint/tendermint/crypto/tmhash + 1 github.com/tendermint/tendermint/crypto/xsalsa20symmetric + 11 github.com/tendermint/tendermint/libs/bytes + 2 github.com/tendermint/tendermint/libs/bytes.HexBytes + 15 github.com/tendermint/tendermint/libs/cli + 2 github.com/tendermint/tendermint/libs/cli/flags + 2 github.com/tendermint/tendermint/libs/json + 30 github.com/tendermint/tendermint/libs/log + 1 github.com/tendermint/tendermint/libs/math + 11 github.com/tendermint/tendermint/libs/os + 4 github.com/tendermint/tendermint/libs/rand + 1 github.com/tendermint/tendermint/libs/strings + 5 github.com/tendermint/tendermint/light + 1 github.com/tendermint/tendermint/mempool + 3 github.com/tendermint/tendermint/node + 5 github.com/tendermint/tendermint/p2p + 4 github.com/tendermint/tendermint/privval + 10 github.com/tendermint/tendermint/proto/tendermint/crypto + 1 github.com/tendermint/tendermint/proto/tendermint/libs/bits + 24 github.com/tendermint/tendermint/proto/tendermint/types + 3 github.com/tendermint/tendermint/proto/tendermint/version + 2 github.com/tendermint/tendermint/proxy + 3 github.com/tendermint/tendermint/rpc/client + 1 github.com/tendermint/tendermint/rpc/client/http + 2 github.com/tendermint/tendermint/rpc/client/local + 3 github.com/tendermint/tendermint/rpc/core/types + 1 github.com/tendermint/tendermint/rpc/jsonrpc/server + 33 github.com/tendermint/tendermint/types + 2 github.com/tendermint/tendermint/types/time + 1 github.com/tendermint/tendermint/version +``` + +### Backwards-Compatible Changes + +In Go, [almost all API changes are backwards-incompatible](https://blog.golang.org/module-compatibility) and thus exported items in public APIs generally cannot be changed until Tendermint 2.0. The only backwards-compatible changes we can make to public APIs are: + +- Adding a package. + +- Adding a new identifier to the package scope (e.g. const, var, func, struct, interface, etc.). + +- Adding a new method to a struct. + +- Adding a new field to a struct, if the zero-value preserves any old behavior. + +- Changing the order of fields in a struct. + +- Adding a variadic parameter to a named function or struct method, if the function type itself is not assignable in any public APIs (e.g. a callback). + +- Adding a new method to an interface, or a variadic parameter to an interface method, _if the interface already has a private method_ (which prevents external packages from implementing it). + +- Widening a numeric type as long as it is a named type (e.g. `type Number int32` can change to `int64`, but not `int8` or `uint32`). + +Note that public APIs can expose private types (e.g. via an exported variable, field, or function/method return value), in which case the exported fields and methods on these private types are also part of the public API and covered by its backwards compatiblity guarantees. In general, private types should never be accessible via public APIs unless wrapped in an exported interface. + +Also note that if we accept, return, export, or embed types from a dependency, we assume the backwards compatibility responsibility for that dependency, and must make sure any dependency upgrades comply with the above constraints. + +We should run CI linters for minor version branches to enforce this, e.g. [apidiff](https://go.googlesource.com/exp/+/refs/heads/master/apidiff/README.md), [breakcheck](https://github.com/gbbr/breakcheck), and [apicombat](https://github.com/bradleyfalzon/apicompat). + +#### Accepted Breakage + +The above changes can still break programs in a few ways - these are _not_ considered backwards-incompatible changes, and users are advised to avoid this usage: + +- If a program uses unkeyed struct literals (e.g. `Foo{"bar", "baz"}`) and we add fields or change the field order, the program will no longer compile or may have logic errors. + +- If a program embeds two structs in a struct, and we add a new field or method to an embedded Tendermint struct which also exists in the other embedded struct, the program will no longer compile. + +- If a program compares two structs (e.g. with `==`), and we add a new field of an incomparable type (slice, map, func, or struct that contains these) to a Tendermint struct which is compared, the program will no longer compile. + +- If a program assigns a Tendermint function to an identifier, and we add a variadic parameter to the function signature, the program will no longer compile. + +### Strategies for API Evolution + +The API guarantees above can be fairly constraining, but are unavoidable given the Go language design. The following tricks can be employed where appropriate to allow us to make changes to the API: + +- We can add a new function or method with a different name that takes additional parameters, and have the old function call the new one. + +- Functions and methods can take an options struct instead of separate parameters, to allow adding new options - this is particularly suitable for functions that take many parameters and are expected to be extended, and especially for interfaces where we cannot add new methods with different parameters at all. + +- Interfaces can include a private method, e.g. `interface { private() }`, to make them unimplementable by external packages and thus allow us to add new methods to the interface without breaking other programs. Of course, this can't be used for interfaces that should be implementable externally. + +- We can use [interface upgrades](https://avtok.com/2014/11/05/interface-upgrades.html) to allow implementers of an existing interface to also implement a new interface, as long as the old interface can still be used - e.g. the new interface `BetterReader` may have a method `ReadBetter()`, and a function that takes a `Reader` interface as an input can check if the implementer also implements `BetterReader` and in that case call `ReadBetter()` instead of `Read()`. + +## Status + +Accepted + +## Consequences + +### Positive + +- Users can safely upgrade with less fear of applications breaking, and know whether an upgrade only includes bug fixes or also functional enhancements + +- External developers have a predictable and well-defined API to build on that will be supported for some time + +- Less synchronization between teams, since there is a clearer contract and timeline for changes and they happen less frequently + +- More documentation will remain accurate, since it's not chasing a moving target + +- Less time will be spent on code churn and more time spent on functional improvements, both for the community and for our teams + +### Negative + +- Many improvements, changes, and bug fixes will have to be postponed until the next major version, possibly for a year or more + +- The pace of development will slow down, since we must work within the existing API constraints, and spend more time planning public APIs + +- External developers may lose access to some currently exported APIs and functionality + +## References + +- [#4451: Place internal APIs under internal package](https://github.com/tendermint/tendermint/issues/4451) + +- [On Pluggability](https://docs.google.com/document/d/1G08LnwSyb6BAuCVSMF3EKn47CGdhZ5wPZYJQr4-bw58/edit?ts=5f609f11) diff --git a/docs/architecture/adr-061-p2p-refactor-scope.md b/docs/architecture/adr-061-p2p-refactor-scope.md new file mode 100644 index 000000000..7a9cb04be --- /dev/null +++ b/docs/architecture/adr-061-p2p-refactor-scope.md @@ -0,0 +1,109 @@ +# ADR 061: P2P Refactor Scope + +## Changelog + +- 2020-10-30: Initial version (@erikgrinaker) + +## Context + +The `p2p` package responsible for peer-to-peer networking is rather old and has a number of weaknesses, including tight coupling, leaky abstractions, lack of tests, DoS vulnerabilites, poor performance, custom protocols, and incorrect behavior. A refactor has been discussed for several years ([#2067](https://github.com/tendermint/tendermint/issues/2067)). + +Informal Systems are also building a Rust implementation of Tendermint, [Tendermint-rs](https://github.com/informalsystems/tendermint-rs), and plan to implement P2P networking support over the next year. As part of this work, they have requested adopting e.g. [QUIC](https://datatracker.ietf.org/doc/draft-ietf-quic-transport/) as a transport protocol instead of implementing the custom application-level `MConnection` stream multiplexing protocol that Tendermint currently uses. + +This ADR summarizes recent discussion with stakeholders on the scope of a P2P refactor. Specific designs and implementations will be submitted as separate ADRs. + +## Alternative Approaches + +There have been recurring proposals to adopt [LibP2P](https://libp2p.io) instead of maintaining our own P2P networking stack (see [#3696](https://github.com/tendermint/tendermint/issues/3696)). While this appears to be a good idea in principle, it would be a highly breaking protocol change, there are indications that we might have to fork and modify LibP2P, and there are concerns about the abstractions used. + +In discussions with Informal Systems we decided to begin with incremental improvements to the current P2P stack, add support for pluggable transports, and then gradually start experimenting with LibP2P as a transport layer. If this proves successful, we can consider adopting it for higher-level components at a later time. + +## Decision + +The P2P stack will be refactored and improved iteratively, in several phases: + +* **Phase 1:** code and API refactoring, maintaining protocol compatibility as far as possible. + +* **Phase 2:** additional transports and incremental protocol improvements. + +* **Phase 3:** disruptive protocol changes. + +The scope of phases 2 and 3 is still uncertain, and will be revisited once the preceding phases have been completed as we'll have a better sense of requirements and challenges. + +## Detailed Design + +Separate ADRs will be submitted for specific designs and changes in each phase, following research and prototyping. Below are objectives in order of priority. + +### Phase 1: Code and API Refactoring + +This phase will focus on improving the internal abstractions and implementations in the `p2p` package. As far as possible, it should not change the P2P protocol in a backwards-incompatible way. + +* Cleaner, decoupled abstractions for e.g. `Reactor`, `Switch`, and `Peer`. [#2067](https://github.com/tendermint/tendermint/issues/2067) [#5287](https://github.com/tendermint/tendermint/issues/5287) [#3833](https://github.com/tendermint/tendermint/issues/3833) + * Reactors should receive messages in separate goroutines or via buffered channels. [#2888](https://github.com/tendermint/tendermint/issues/2888) +* Improved peer lifecycle management. [#3679](https://github.com/tendermint/tendermint/issues/3679) [#3719](https://github.com/tendermint/tendermint/issues/3719) [#3653](https://github.com/tendermint/tendermint/issues/3653) [#3540](https://github.com/tendermint/tendermint/issues/3540) [#3183](https://github.com/tendermint/tendermint/issues/3183) [#3081](https://github.com/tendermint/tendermint/issues/3081) [#1356](https://github.com/tendermint/tendermint/issues/1356) + * Peer prioritization. [#2860](https://github.com/tendermint/tendermint/issues/2860) [#2041](https://github.com/tendermint/tendermint/issues/2041) +* Pluggable transports, with `MConnection` as one implementation. [#5587](https://github.com/tendermint/tendermint/issues/5587) [#2430](https://github.com/tendermint/tendermint/issues/2430) [#805](https://github.com/tendermint/tendermint/issues/805) +* Improved peer address handling. + * Address book refactor. [#4848](https://github.com/tendermint/tendermint/issues/4848) [#2661](https://github.com/tendermint/tendermint/issues/2661) + * Transport-agnostic peer addressing. [#5587](https://github.com/tendermint/tendermint/issues/5587) [#3782](https://github.com/tendermint/tendermint/issues/3782) [#3692](https://github.com/tendermint/tendermint/issues/3692) + * Improved detection and advertisement of own address. [#5588](https://github.com/tendermint/tendermint/issues/5588) [#4260](https://github.com/tendermint/tendermint/issues/4260) [#3716](https://github.com/tendermint/tendermint/issues/3716) [#1727](https://github.com/tendermint/tendermint/issues/1727) + * Support multiple IPs per peer. [#1521](https://github.com/tendermint/tendermint/issues/1521) [#2317](https://github.com/tendermint/tendermint/issues/2317) + +The refactor should attempt to address the following secondary objectives: testability, observability, performance, security, quality-of-service, backpressure, and DoS resilience. Much of this will be revisited as explicit objectives in phase 2. + +Ideally, the refactor should happen incrementally, with regular merges to `master` every few weeks. This will take more time overall, and cause frequent breaking changes to internal Go APIs, but it reduces the branch drift and gets the code tested sooner and more broadly. + +### Phase 2: Additional Transports and Protocol Improvements + +This phase will focus on protocol improvements and other breaking changes. The following are considered proposals that will need to be evaluated separately once the refactor is done. Additional proposals are likely to be added during phase 1. + +* QUIC transport. [#198](https://github.com/tendermint/spec/issues/198) +* Noise protocol for secret connection handshake. [#5589](https://github.com/tendermint/tendermint/issues/5589) [#3340](https://github.com/tendermint/tendermint/issues/3340) +* Peer ID in connection handshake. [#5590](https://github.com/tendermint/tendermint/issues/5590) +* Peer and service discovery (e.g. RPC nodes, state sync snapshots). [#5481](https://github.com/tendermint/tendermint/issues/5481) [#4583](https://github.com/tendermint/tendermint/issues/4583) +* Rate-limiting, backpressure, and QoS scheduling. [#4753](https://github.com/tendermint/tendermint/issues/4753) [#2338](https://github.com/tendermint/tendermint/issues/2338) +* Compression. [#2375](https://github.com/tendermint/tendermint/issues/2375) +* Improved metrics and tracing. [#3849](https://github.com/tendermint/tendermint/issues/3849) [#2600](https://github.com/tendermint/tendermint/issues/2600) +* Simplified P2P configuration options. + +### Phase 3: Disruptive Protocol Changes + +This phase covers speculative, wide-reaching proposals that are poorly defined and highly uncertain. They will be evaluated once the previous phases are done. + +* Adopt LibP2P. [#3696](https://github.com/tendermint/tendermint/issues/3696) +* Allow cross-reactor communication, possibly without channels. +* Dynamic channel advertisment, as reactors are enabled/disabled. [#4394](https://github.com/tendermint/tendermint/issues/4394) [#1148](https://github.com/tendermint/tendermint/issues/1148) +* Pubsub-style networking topology and pattern. +* Support multiple chain IDs in the same network. + +## Status + +Accepted + +## Consequences + +### Positive + +* Cleaner, simpler architecture that's easier to reason about and test, and thus hopefully less buggy. + +* Improved performance and robustness. + +* Reduced maintenance burden and increased interoperability by the possible adoption of standardized protocols such as QUIC and Noise. + +* Improved usability, with better observability, simpler configuration, and more automation (e.g. peer/service/address discovery, rate-limiting, and backpressure). + +### Negative + +* Maintaining our own P2P networking stack is resource-intensive. + +* Abstracting away the underlying transport may prevent usage of advanced transport features. + +* Breaking changes to APIs and protocols are disruptive to users. + +## References + +See issue links above. + +- [#2067: P2P Refactor](https://github.com/tendermint/tendermint/issues/2067) + +- [P2P refactor brainstorm document](https://docs.google.com/document/d/1FUTADZyLnwA9z7ndayuhAdAFRKujhh_y73D0ZFdKiOQ/edit?pli=1#) diff --git a/docs/architecture/adr-062-p2p-architecture.md b/docs/architecture/adr-062-p2p-architecture.md new file mode 100644 index 000000000..5fae7301d --- /dev/null +++ b/docs/architecture/adr-062-p2p-architecture.md @@ -0,0 +1,531 @@ +# ADR 062: P2P Architecture and Abstractions + +## Changelog + +- 2020-11-09: Initial version (@erikgrinaker) + +- 2020-11-13: Remove stream IDs, move peer errors onto channel, note on moving PEX into core (@erikgrinaker) + +- 2020-11-16: Notes on recommended reactor implementation patterns, approve ADR (@erikgrinaker) + +## Context + +In [ADR 061](adr-061-p2p-refactor-scope.md) we decided to refactor the peer-to-peer (P2P) networking stack. The first phase is to redesign and refactor the internal P2P architecture, while retaining protocol compatibility as far as possible. + +## Alternative Approaches + +Several variations of the proposed design were considered, including e.g. calling interface methods instead of passing messages (like the current architecture), merging channels with streams, exposing the internal peer data structure to reactors, being message format-agnostic via arbitrary codecs, and so on. This design was chosen because it has very loose coupling, is simpler to reason about and more convenient to use, avoids race conditions and lock contention for internal data structures, gives reactors better control of message ordering and processing semantics, and allows for QoS scheduling and backpressure in a very natural way. + +[multiaddr](https://github.com/multiformats/multiaddr) was considered as a transport-agnostic peer address format over regular URLs, but it does not appear to have very widespread adoption, and advanced features like protocol encapsulation and tunneling do not appear to be immediately useful to us. + +There were also proposals to use LibP2P instead of maintaining our own P2P stack, which were rejected (for now) in [ADR 061](adr-061-p2p-refactor-scope.md). + +## Decision + +The P2P stack will be redesigned as a message-oriented architecture, primarily relying on Go channels for communication and scheduling. It will use IO stream transports to exchange raw bytes with individual peers, bidirectional peer-addressable channels to send and receive Protobuf messages, and a router to route messages between reactors and peers. Message passing is asynchronous with at-most-once delivery. + +## Detailed Design + +This ADR is primarily concerned with the architecture and interfaces of the P2P stack, not implementation details. Separate ADRs may be submitted for individual components, since implementation may be non-trivial. The interfaces described here should therefore be considered a rough architecture outline, not a complete and final design. + +Primary design objectives have been: + +* Loose coupling between components, for a simpler, more robust, and test-friendly architecture. +* Pluggable transports (not necessarily networked). +* Better scheduling of messages, with improved prioritization, backpressure, and performance. +* Centralized peer lifecycle and connection management. +* Better peer address detection, advertisement, and exchange. +* Wire-level backwards compatibility with current P2P network protocols, except where it proves too obstructive. + +The main abstractions in the new stack are: + +* `peer`: A node in the network, uniquely identified by a `PeerID` and stored in a `peerStore`. +* `Transport`: An arbitrary mechanism to exchange bytes with a peer using IO `Stream`s across a `Connection`. +* `Channel`: A bidirectional channel to asynchronously exchange Protobuf messages with peers addressed with `PeerID`. +* `Router`: Maintains transport connections to relevant peers and routes channel messages. +* Reactor: A design pattern loosely defined as "something which listens on a channel and reacts to messages". + +These abstractions are illustrated in the following diagram (representing the internals of node A) and described in detail below. + +![P2P Architecture Diagram](img/adr-062-architecture.svg) + +### Transports + +Transports are arbitrary mechanisms for exchanging raw bytes with a peer. For example, a gRPC transport would connect to a peer over TCP/IP and send data using the gRPC protocol, while an in-memory transport might communicate with a peer running in another goroutine using internal byte buffers. Note that transports don't have a notion of a `peer` as such - instead, they communicate with an arbitrary endpoint address (e.g. IP address and port number), to decouple them from the rest of the P2P stack. + +Transports must satisfy the following requirements: + +* Be connection-oriented, and support both listening for inbound connections and making outbound connections using endpoint addresses. + +* Support multiple logical IO streams within a single connection, to take full advantage of protocols with native stream support. For example, QUIC supports multiple independent streams, while HTTP/2 and MConn multiplex logical streams onto a single TCP connection. + +* Provide the public key of the peer, and possibly encrypt or sign the traffic as appropriate. This should be compared with known data (e.g. the peer ID) to authenticate the peer and avoid man-in-the-middle attacks. + +The initial transport implementation will be a port of the current MConn protocol currently used by Tendermint, and should be backwards-compatible at the wire level as far as possible. This will be followed by an in-memory transport for testing, and a QUIC transport that may eventually replace MConn. + +The `Transport` interface is: + +```go +// Transport is an arbitrary mechanism for exchanging bytes with a peer. +type Transport interface { + // Accept waits for the next inbound connection on a listening endpoint. + Accept(context.Context) (Connection, error) + + // Dial creates an outbound connection to an endpoint. + Dial(context.Context, Endpoint) (Connection, error) + + // Endpoints lists endpoints the transport is listening on. Any endpoint IP + // addresses do not need to be normalized in any way (e.g. 0.0.0.0 is + // valid), as they should be preprocessed before being advertised. + Endpoints() []Endpoint +} +``` + +How the transport configures listening is transport-dependent, and not covered by the interface. This typically happens during transport construction, where a single instance of the transport is created and set to listen on an appropriate network interface before being passed to the router. + +#### Endpoints + +`Endpoint` represents a transport endpoint (e.g. an IP address and port). A connection always has two endpoints: one at the local node and one at the remote peer. Outbound connections to remote endpoints are made via `Dial()`, and inbound connections to listening endpoints are returned via `Accept()`. + +The `Endpoint` struct is: + +```go +// Endpoint represents a transport connection endpoint, either local or remote. +type Endpoint struct { + // Protocol specifies the transport protocol, used by the router to pick a + // transport for an endpoint. + Protocol Protocol + + // Path is an optional, arbitrary transport-specific path or identifier. + Path string + + // IP is an IP address (v4 or v6) to connect to. If set, this defines the + // endpoint as a networked endpoint. + IP net.IP + + // Port is a network port (either TCP or UDP). If not set, a default port + // may be used depending on the protocol. + Port uint16 +} + +// Protocol identifies a transport protocol. +type Protocol string +``` + +Endpoints are arbitrary transport-specific addresses, but if they are networked they must use IP addresses and thus rely on IP as a fundamental packet routing protocol. This enables policies for address discovery, advertisement, and exchange - for example, a private `192.168.0.0/24` IP address should only be advertised to peers on that IP network, while the public address `8.8.8.8` may be advertised to all peers. Similarly, any port numbers if given must represent TCP and/or UDP port numbers, in order to use [UPnP](https://en.wikipedia.org/wiki/Universal_Plug_and_Play) to autoconfigure e.g. NAT gateways. + +Non-networked endpoints (without an IP address) are considered local, and will only be advertised to other peers connecting via the same protocol. For example, an in-memory transport used for testing might have `Endpoint{Protocol: "memory", Path: "foo"}` as an address for the node "foo", and this should only be advertised to other nodes using `Protocol: "memory"`. + +#### Connections and Streams + +A connection represents an established transport connection between two endpoints (and thus two nodes), which can be used to exchange bytes via logically distinct IO streams. Connections are set up either via `Transport.Dial()` (outbound) or `Transport.Accept()` (inbound). The caller is responsible for verifying the remote peer's public key as returned by the connection, following the current MConn protocol behavior for now. + +Data is exchanged over IO streams created with `Connection.Stream()`. These implement the standard Go `io.Reader` and `io.Writer` interfaces to read and write bytes. Transports are free to choose how to implement such streams, e.g. by taking advantage of native stream support in the underlying protocol or through multiplexing. + +`Connection` and the related `Stream` interfaces are: + +```go +// Connection represents an established connection between two endpoints. +type Connection interface { + // Stream creates a new logically distinct IO stream within the connection. + Stream() (Stream, error) + + // LocalEndpoint returns the local endpoint for the connection. + LocalEndpoint() Endpoint + + // RemoteEndpoint returns the remote endpoint for the connection. + RemoteEndpoint() Endpoint + + // PubKey returns the public key of the remote peer. + PubKey() crypto.PubKey + + // Close closes the connection. + Close() error +} + +// Stream represents a single logical IO stream within a connection. +type Stream interface { + io.Reader // Read([]byte) (int, error) + io.Writer // Write([]byte) (int, error) + io.Closer // Close() error +} +``` + +### Peers + +Peers are other Tendermint network nodes. Each peer is identified by a unique `PeerID`, and has a set of `PeerAddress` addresses expressed as URLs that they can be reached at. Examples of peer addresses might be e.g.: + +* `mconn://b10c@host.domain.com:25567/path` +* `unix:///var/run/tendermint/peer.sock` +* `memory:testpeer` + +Addresses are resolved into one or more transport endpoints, e.g. by resolving DNS hostnames into IP addresses (which should be refreshed periodically). Peers should always be expressed as address URLs, and never as endpoints which are a lower-level construct. + +```go +// PeerID is a unique peer ID, generally expressed in hex form. +type PeerID []byte + +// PeerAddress is a peer address URL. The User field, if set, gives the +// hex-encoded remote PeerID, which should be verified with the remote peer's +// public key as returned by the connection. +type PeerAddress url.URL + +// Resolve resolves a PeerAddress into a set of Endpoints, typically by +// expanding out a DNS name in Host to its IP addresses. Field mapping: +// +// Scheme → Endpoint.Protocol +// Host → Endpoint.IP +// Port → Endpoint.Port +// Path+Query+Fragment,Opaque → Endpoint.Path +// +func (a PeerAddress) Resolve(ctx context.Context) []Endpoint { return nil } +``` + +The P2P stack needs to track a lot of internal information about peers, such as endpoints, status, priorities, and so on. This is done in an internal `peer` struct, which should not be exposed outside of the `p2p` package (e.g. to reactors) in order to avoid race conditions and lock contention - other packages should use `PeerID`. + +The `peer` struct might look like the following, but is intentionally underspecified and will depend on implementation requirements (for example, it will almost certainly have to track statistics about connection failures and retries): + +```go +// peer tracks internal status information about a peer. +type peer struct { + ID PeerID + Status PeerStatus + Priority PeerPriority + Endpoints map[PeerAddress][]Endpoint // Resolved endpoints by address. +} + +// PeerStatus specifies peer statuses. +type PeerStatus string + +const ( + PeerStatusNew = "new" // New peer which we haven't tried to contact yet. + PeerStatusUp = "up" // Peer which we have an active connection to. + PeerStatusDown = "down" // Peer which we're temporarily disconnected from. + PeerStatusRemoved = "removed" // Peer which has been removed. + PeerStatusBanned = "banned" // Peer which is banned for misbehavior. +) + +// PeerPriority specifies peer priorities. +type PeerPriority int + +const ( + PeerPriorityNormal PeerPriority = iota + 1 + PeerPriorityValidator + PeerPriorityPersistent +) +``` + +Peer information is stored in a `peerStore`, which may be persisted in an underlying database, and will replace the current address book either partially or in full. It is kept internal to avoid race conditions and tight coupling, and should at the very least contain basic CRUD functionality as outlined below, but will likely need additional functionality and is intentionally underspecified: + +```go +// peerStore contains information about peers, possibly persisted to disk. +type peerStore struct { + peers map[string]*peer // Entire set in memory, with PeerID.String() keys. + db dbm.DB // Database for persistence, if non-nil. +} + +func (p *peerStore) Delete(id PeerID) error { return nil } +func (p *peerStore) Get(id PeerID) (peer, bool) { return peer{}, false } +func (p *peerStore) List() []peer { return nil } +func (p *peerStore) Set(peer peer) error { return nil } +``` + +Peer address detection, advertisement and exchange (including detection of externally-reachable addresses via e.g. NAT gateways) is out of scope for this ADR, but may be covered in a separate ADR. The current PEX reactor should probably be absorbed into the core P2P stack and protocol instead of running as a separate reactor, since this needs to mutate the core peer data structures and will thus be tightly coupled with the router. + +### Channels + +While low-level data exchange happens via transport IO streams, the high-level API is based on a bidirectional `Channel` that can send and receive Protobuf messages addressed by `PeerID`. A channel is identified by an arbitrary `ChannelID` identifier, and can exchange Protobuf messages of one specific type (since the type to unmarshal into must be known). Message delivery is asynchronous and at-most-once. + +The channel can also be used to report peer errors, e.g. when receiving an invalid or malignant message. This may cause the peer to be disconnected or banned depending on the router's policy. + +A `Channel` has this interface: + +```go +// Channel is a bidirectional channel for Protobuf message exchange with peers. +type Channel struct { + // ID contains the channel ID. + ID ChannelID + + // messageType specifies the type of messages exchanged via the channel, and + // is used e.g. for automatic unmarshaling. + messageType proto.Message + + // In is a channel for receiving inbound messages. Envelope.From is always + // set. + In <-chan Envelope + + // Out is a channel for sending outbound messages. Envelope.To or Broadcast + // must be set, otherwise the message is discarded. + Out chan<- Envelope + + // Error is a channel for reporting peer errors to the router, typically used + // when peers send an invalid or malignant message. + Error chan<- PeerError +} + +// Close closes the channel, and is equivalent to close(Channel.Out). This will +// cause Channel.In to be closed when appropriate. The ID can then be reused. +func (c *Channel) Close() error { return nil } + +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 + +// Envelope specifies the message receiver and sender. +type Envelope struct { + From PeerID // Message sender, or empty for outbound messages. + To PeerID // Message receiver, or empty for inbound messages. + Broadcast bool // Send message to all connected peers, ignoring To. + Message proto.Message // Payload. +} + +// PeerError is a peer error reported by a reactor via the Error channel. The +// severity may cause the peer to be disconnected or banned depending on policy. +type PeerError struct { + PeerID PeerID + Err error + Severity PeerErrorSeverity +} + +// PeerErrorSeverity determines the severity of a peer error. +type PeerErrorSeverity string + +const ( + PeerErrorSeverityLow PeerErrorSeverity = "low" // Mostly ignored. + PeerErrorSeverityHigh PeerErrorSeverity = "high" // May disconnect. + PeerErrorSeverityCritical PeerErrorSeverity = "critical" // Ban. +) +``` + +A channel can reach any connected peer, and is implemented using transport streams against each individual peer, with an initial handshake to exchange the channel ID and any other metadata. The channel will automatically (un)marshal Protobuf to byte slices and use length-prefixed framing (the de facto standard for Protobuf streams) when writing them to the stream. + +Message scheduling and queueing is left as an implementation detail, and can use any number of algorithms such as FIFO, round-robin, priority queues, etc. Since message delivery is not guaranteed, both inbound and outbound messages may be dropped, buffered, or blocked as appropriate. + +Since a channel can only exchange messages of a single type, it is often useful to use a wrapper message type with e.g. a Protobuf `oneof` field that specifies a set of inner message types that it can contain. The channel can automatically perform this (un)wrapping if the outer message type implements the `Wrapper` interface (see [Reactor Example](#reactor-example) for an example): + +```go +// Wrapper is a Protobuf message that can contain a variety of inner messages. +// If a Channel's message type implements Wrapper, the channel will +// automatically (un)wrap passed messages using the container type, such that +// the channel can transparently support multiple message types. +type Wrapper interface { + // Wrap will take a message and wrap it in this one. + Wrap(proto.Message) error + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} +``` + +### Routers + +The router manages all P2P networking for a node, and is responsible for keeping track of network peers, maintaining transport connections, and routing channel messages. As such, it must do e.g. connection retries and backoff, message QoS scheduling and backpressure, peer quality assessments, and endpoint detection and advertisement. In addition, the router provides a mechanism to subscribe to peer updates (e.g. peers connecting or disconnecting), and handles reported peer errors from reactors. + +The implementation of the router is likely to be non-trivial, and is intentionally unspecified here. A separate ADR will likely be submitted for this. It is unclear whether message routing/scheduling and peer lifecycle management can be split into two separate components, or if these need to be tightly coupled. + +The `Router` API is as follows: + +```go +// Router manages connections to peers and routes Protobuf messages between them +// and local reactors. It also provides peer status updates and error reporting. +type Router struct{} + +// NewRouter creates a new router, using the given peer store to track peers. +// Transports must be pre-initialized to listen on appropriate endpoints. +func NewRouter(peerStore *peerStore, transports map[Protocol]Transport) *Router { return nil } + +// Channel opens a new channel with the given ID. messageType should be an empty +// Protobuf message of the type that will be passed through the channel. The +// message can implement Wrapper for automatic message (un)wrapping. +func (r *Router) Channel(id ChannelID, messageType proto.Message) (*Channel, error) { return nil, nil } + +// PeerUpdates returns a channel with peer updates. The caller must cancel the +// context to end the subscription, and keep consuming messages in a timely +// fashion until the channel is closed to avoid blocking updates. +func (r *Router) PeerUpdates(ctx context.Context) PeerUpdates { return nil } + +// PeerUpdates is a channel for receiving peer updates. +type PeerUpdates <-chan PeerUpdate + +// PeerUpdate is a peer status update for reactors. +type PeerUpdate struct { + PeerID PeerID + Status PeerStatus +} +``` + +### Reactor Example + +While reactors are a first-class concept in the current P2P stack (i.e. there is an explicit `p2p.Reactor` interface), they will simply be a design pattern in the new stack, loosely defined as "something which listens on a channel and reacts to messages". + +Since reactors have very few formal constraints, they can be implemented in a variety of ways. There is currently no recommended pattern for implementing reactors, to avoid overspecification and scope creep in this ADR. However, prototyping and developing a reactor pattern should be done early during implementation, to make sure reactors built using the `Channel` interface can satisfy the needs for convenience, deterministic tests, and reliability. + +Below is a trivial example of a simple echo reactor implemented as a function. The reactor will exchange the following Protobuf messages: + +```protobuf +message EchoMessage { + oneof inner { + PingMessage ping = 1; + PongMessage pong = 2; + } +} + +message PingMessage { + string content = 1; +} + +message PongMessage { + string content = 1; +} +``` + +Implementing the `Wrapper` interface for `EchoMessage` allows transparently passing `PingMessage` and `PongMessage` through the channel, where it will automatically be (un)wrapped in an `EchoMessage`: + +```go +func (m *EchoMessage) Wrap(inner proto.Message) error { + switch inner := inner.(type) { + case *PingMessage: + m.Inner = &EchoMessage_PingMessage{Ping: inner} + case *PongMessage: + m.Inner = &EchoMessage_PongMessage{Pong: inner} + default: + return fmt.Errorf("unknown message %T", inner) + } + return nil +} + +func (m *EchoMessage) Unwrap() (proto.Message, error) { + switch inner := m.Inner.(type) { + case *EchoMessage_PingMessage: + return inner.Ping, nil + case *EchoMessage_PongMessage: + return inner.Pong, nil + default: + return nil, fmt.Errorf("unknown message %T", inner) + } +} +``` + +The reactor itself would be implemented e.g. like this: + +```go +// RunEchoReactor wires up an echo reactor to a router and runs it. +func RunEchoReactor(router *p2p.Router) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + channel, err := router.Channel(1, &EchoMessage{}) + if err != nil { + return err + } + defer channel.Close() + + return EchoReactor(ctx, channel, router.PeerUpdates(ctx)) +} + +// EchoReactor provides an echo service, pinging all known peers until cancelled. +func EchoReactor(ctx context.Context, channel *p2p.Channel, peerUpdates p2p.PeerUpdates) error { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + // Send ping message to all known peers every 5 seconds. + case <-ticker.C: + channel.Out <- Envelope{ + Broadcast: true, + Message: &PingMessage{Content: "👋"}, + } + + // When we receive a message from a peer, either respond to ping, output + // pong, or report peer error on unknown message type. + case envelope := <-channel.In: + switch msg := envelope.Message.(type) { + case *PingMessage: + channel.Out <- Envelope{ + To: envelope.From, + Message: &PongMessage{Content: msg.Content}, + } + + case *PongMessage: + fmt.Printf("%q replied with %q\n", envelope.From, msg.Content) + + default: + channel.Error <- PeerError{ + PeerID: envelope.From, + Err: fmt.Errorf("unexpected message %T", msg), + Severity: PeerErrorSeverityLow, + } + } + + // Output info about any peer status changes. + case peerUpdate := <-peerUpdates: + fmt.Printf("Peer %q changed status to %q", peerUpdate.PeerID, peerUpdate.Status) + + // Exit when context is cancelled. + case <-ctx.Done(): + return nil + } + } +} +``` + +### Implementation Plan + +The existing P2P stack should be gradually migrated towards this design. The easiest path would likely be: + +1. Implement the `Channel` and `PeerUpdates` APIs as shims on top of the current `Switch` and `Peer` APIs, and rewrite all reactors to use them instead. + +2. Port the `privval` package to no longer use `SecretConnection` (e.g. by using gRPC instead), or temporarily duplicate its functionality. + +3. Rewrite the current MConn connection and transport code to use the new `Transport` API, and migrate existing code to use it instead. + +4. Implement the new `peer` and `peerStore` APIs, and either make the current address book a shim on top of these or replace it. + +5. Replace the existing `Switch` abstraction with the new `Router`. + +6. Move the PEX reactor and other address advertisement/exchange into the P2P core, possibly the `Router`. + +7. Consider rewriting and/or cleaning up reactors and other P2P-related code to make better use of the new abstractions. + +A note on backwards-compatibility: the current MConn protocol takes whole messages expressed as byte slices and splits them up into `PacketMsg` messages, where the final packet of a message has `PacketMsg.EOF` set. In order to maintain wire-compatibility with this protocol, the MConn transport needs to be aware of message boundaries, even though it does not care what the messages actually are. One way to handle this is to break abstraction boundaries and have the transport decode the input's length-prefixed message framing and use this to determine message boundaries, unless we accept breaking the protocol here. + +Similarly, implementing channel handshakes with the current MConn protocol would require doing an initial connection handshake as today and use that information to "fake" the local channel handshake without it hitting the wire. + +## Status + +Accepted + +## Consequences + +### Positive + +* Reduced coupling and simplified interfaces should lead to better understandability, increased reliability, and more testing. + +* Using message passing via Go channels gives better control of backpressure and quality-of-service scheduling. + +* Peer lifecycle and connection management is centralized in a single entity, making it easier to reason about. + +* Detection, advertisement, and exchange of node addresses will be improved. + +* Additional transports (e.g. QUIC) can be implemented and used in parallel with the existing MConn protocol. + +* The P2P protocol will not be broken in the initial version, if possible. + +### Negative + +* Fully implementing the new design as indended is likely to require breaking changes to the P2P protocol at some point, although the initial implementation shouldn't. + +* Gradually migrating the existing stack and maintaining backwards-compatibility will be more labor-intensive than simply replacing the entire stack. + +* A complete overhaul of P2P internals is likely to cause temporary performance regressions and bugs as the implementation matures. + +* Hiding peer management information inside the `p2p` package may prevent certain functionality or require additional deliberate interfaces for information exchange, as a tradeoff to simplify the design, reduce coupling, and avoid race conditions and lock contention. + +### Neutral + +* Implementation details around e.g. peer management, message scheduling, and peer and endpoint advertisement are not yet determined. + +## References + +* [ADR 061: P2P Refactor Scope](adr-061-p2p-refactor-scope.md) +* [#5670 p2p: internal refactor and architecture redesign](https://github.com/tendermint/tendermint/issues/5670) diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 759fc6d72..c7b1e542a 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -8,18 +8,52 @@ > This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. +## Alternative Approaches + +> This section contains information around alternative options that are considered before making a decision. It should contain a explanation on why the alternative approach(es) were not chosen. + ## Decision -> This section explains all of the details of the proposed solution, including implementation details. -> It should also describe affects / corollary items that may need to be changed as a part of this. -> If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -> (e.g. the optimal split of things to do between separate PR's) +> This section records the decision that was made. +> It is best to record as much info as possible from the discussion that happened. This aids in not having to go back to the Pull Request to get the needed information. + +## Detailed Design + +> This section does not need to be filled in at the start of the ADR, but must be completed prior to the merging of the implementation. +> +> Here are some common questions that get answered as part of the detailed design: +> +> - What are the user requirements? +> +> - What systems will be affected? +> +> - What new data structures are needed, what data structures will be changed? +> +> - What new APIs will be needed, what APIs will be changed? +> +> - What are the efficiency considerations (time/space)? +> +> - What are the expected access patterns (load/throughput)? +> +> - Are there any logging, monitoring or observability needs? +> +> - Are there any security considerations? +> +> - Are there any privacy considerations? +> +> - How will the changes be tested? +> +> - If the change is large, how will the changes be broken up for ease of review? +> +> - Will these changes require a breaking (major) release? +> +> - Does this change require coordination with the SDK or other? ## Status -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. +> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. Once the ADR has been implemented mark the ADR as "implemented". If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. -{Deprecated|Proposed|Accepted} +{Deprecated|Proposed|Accepted|Declined} ## Consequences diff --git a/docs/architecture/img/adr-062-architecture.svg b/docs/architecture/img/adr-062-architecture.svg new file mode 100644 index 000000000..1ad18a3e0 --- /dev/null +++ b/docs/architecture/img/adr-062-architecture.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/imgs/a_plus_t.png b/docs/imgs/a_plus_t.png deleted file mode 100644 index 8f5bc5e95..000000000 Binary files a/docs/imgs/a_plus_t.png and /dev/null differ diff --git a/docs/imgs/bifurcation-point.png b/docs/imgs/bifurcation-point.png new file mode 100644 index 000000000..dce8938d8 Binary files /dev/null and b/docs/imgs/bifurcation-point.png differ diff --git a/docs/imgs/evidence_lifecycle.png b/docs/imgs/evidence_lifecycle.png new file mode 100644 index 000000000..dc4ed54f5 Binary files /dev/null and b/docs/imgs/evidence_lifecycle.png differ diff --git a/docs/imgs/light-client-detector.png b/docs/imgs/light-client-detector.png new file mode 100644 index 000000000..b098aa6e2 Binary files /dev/null and b/docs/imgs/light-client-detector.png differ diff --git a/docs/introduction/tcp-window.png b/docs/imgs/tcp-window.png similarity index 100% rename from docs/introduction/tcp-window.png rename to docs/imgs/tcp-window.png diff --git a/docs/interviews/readme.md b/docs/interviews/readme.md deleted file mode 100644 index cc9223496..000000000 --- a/docs/interviews/readme.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -parent: - order: false ---- diff --git a/docs/interviews/tendermint-bft.md b/docs/interviews/tendermint-bft.md deleted file mode 100644 index 6d3a940cc..000000000 --- a/docs/interviews/tendermint-bft.md +++ /dev/null @@ -1,250 +0,0 @@ -# Interview Transcript with Tendermint core researcher, Zarko Milosevic, by Chjango - -**ZM**: Regarding leader election, it's round robin, but a weighted one. You -take into account the amount of bonded tokens. Depending on how much weight -they have of voting power, they would be elected more frequently. So we do -rotate, but just the guys who are having more voting power would be elected -more frequently. We are having 4 validators, and 1 of them have 2 times more -voting power, they have 2 times more elected as a leader. - -**CC**: 2x more absolute voting power or probabilistic voting power? - -**ZM**: It's actually very deterministic. It's not probabilistic at all. See -[Tendermint proposal election specification][1]. In Tendermint, there is no -pseudorandom leader election. It's a deterministic protocol. So leader election -is a built-in function in the code, so you know exactly—depending on the voting -power in the validator set, you'd know who exactly would be the leader in round -x, x + 1, and so on. There is nothing random there; we are not trying to hide -who would be the leader. It's really well known. It's just that there is a -function, it's a mathematical function, and it's just basically—it's kind of an -implementation detail—it starts from the voting power, and when you are -elected, you get decreased some number, and in each round you keep increasing -depending on your voting power, so that you are elected after k rounds again. -But knowing the validator set and the voting power, it's very simple function, -you can calculate yourself to know exactly who would be next. For each round, -this function will return you the leader for that round. In every round, we do -this computation. It's all part of the same flow. It enforces the properties -which are: proportional to your voting power, you will be elected, and we keep -changing the leaders. So it can't happen to have one guy being more elected -than other guys, if they have the same voting power. So one time it will be guy -B, and next time it will be guy B1. So it's not random. - -**CC**: Assuming the validator set remains unchanged for a month, then if you -run this function, are you able to know exactly who is going to go for that -entire month? - -**ZM**: Yes. - -**CC**: What're the attack scenarios for this? - -**ZM**: This is something which is easily attacked by people who argue that -Tendermint is not decentralized enough. They say that by knowing the leader, -you can DDoS the leader. And by DDoSing the leader, you are able to stop the -progress. Because it's true. If you would be able to DDoS the leader, the -leader would not be able to propose and then effectively will not be making -progress. How we are addressing this thing is Sentry Architecture. So the -validator—or at least a proper validator—will never be available. You don't -know the ip address of the validator. You are never able to open the connection -to the validator. So validator is spawning sentry nodes and this is the single -administration domain and there is only connection from validator in the sense -of sentry nodes. And ip address of validator is not shared in the p2p network. -It’s completely private. This is our answer to DDoS attack. By playing clever -at this sentry node architecture and spawning additional sentry nodes in case, -for ex your sentry nodes are being DDoS’d, bc your sentry nodes are public, -then you will be able to connect to sentry nodes. this is where we will expect -the validator to be clever enough that so that in case they are DDoS’d at the -sentry level, they will spawn a different sentry node and then you communicate -through them. We are in a sense pushing the responsibility on the validator. - -**CC**: So if I understand this correctly, the public identity of the validator -doesn’t even matter because that entity can obfuscate where their real full -nodes reside via a proxy through this sentry architecture. - -**ZM**: Exactly. So you do know what is the address or identity of the validator -but you don’t know the network address of it; you’re not able to attack it -because you don’t know where they are. They are completely obfuscated by the -sentry nodes. There is now, if you really want to figure out….There is the -Tendermint protocol, the structure of the protocol is not fully decentralized -in the sense that the flow of information is going from the round proposer, or -the round coordinator, to other nodes, and then after they receive this it’s -basically like [inaudible: “O to 1”]. So by tracking where this information is -coming from, you might be able to identify who are the sentry nodes behind it. -So if you are doing some network analysis, you might be able to deduce -something. If the thing would be completely stuck, where the validator would -never change their sentry nodes or ip addresses of sentry nodes, it could be -possible to deduce something. This is where economic game comes into play. We -are doing an economics game there. We say that it’s a validator business. If -they are not able to hide themselves well enough, they’ll be DDoS’d and they -will be kicked out of the active validator set. So it’s in their interest. - -[Proposer Selection Procedure in Tendermint][1]. This is how it should work no -matter what implementation. - -**CC**: Going back to the proposer, lets say the validator does get DDoS’d, then -the proposer goes down. What happens? - -**ZM**: How the proposal mechanism works—there’s nothing special there—it goes -through a sequence of rounds. Normal execution of Tendermint is that for each -height, we are going through a sequence of rounds, starting from round 0, and -then we are incrementing through the rounds. The nodes are moving through the -rounds as part of normal procedure until they decide to commit. In case you -have one proposer—the proposer of a single round—being DDoS’d, we will probably -not decide in that round, because he will not be able to send his proposal. So -we will go to the next round, and hopefully the next proposer will be able to -communicate with the validators and then we’ll decide in the next round. - -**CC**: Are there timeouts between one round to another, if a round gets -skipped? - -**ZM**: There are timeouts. It’s a bit more complex. I think we have 5 timeouts. -We may be able to simplify this a bit. What is important to understand is: The -only condition which needs to be satisfied so we can go to the next round is -that your validator is able to communicate with more than 2/3rds of voting -power. To be able to move to the next round, you need to receive more than -2/3rd of voting power equivalent of pre-commit messages. - -We have two kinds of messages: 1) Proposal: Where the current round proposer is -suggesting how the next block should look like. This is first one. Every round -starts with proposer sending a proposal. And then there are two more rounds of -voting, where the validator is trying to agree whether they will commit the -proposal or not. And the first of such vote messages is called `pre-vote` and -the second one is `pre-commit`. Now, to be able to move between steps, between -a `pre-vote` and `pre-commit` step, you need to receive enough number of -messages where if message is sent by validator A, then also this message has a -weight, or voting power which is equal to the voting power of the validator who -sent this message. Before you receive more than 2/3 of voting power messages, you are not -able to move to the higher round. Only when you receive more than 2/3 of -messages, you actually start the timeout. The timeout is happening only after -you receive enough messages. And it happens because of the asynchrony of the -message communication so you give more time to guys with this timeout to -receive some messages which are maybe delayed. - -**CC**: In this way that you just described via the whole network gossiping -before we commit a block, that is what makes Tendermint BFT deterministic in a -partially synchronous setting vs Bitcoin which has synchrony assumptions -whereby blocks are first mined and then gossiped to the network. - -**ZM**: It's true that in Bitcoin, this is where the synchrony assumption comes -to play because if they're not able to communicate timely, they are not able to -converge to a single longest chain. Why are they not able to decrease timeout -in Bitcoin? Because if they would decrease, there would be so many forks that -they won't be able to converge to a single chain. By increasing this -complexity and the block time, they're able to have not so many forks. This is -effectively the timing assumption—the block duration in a sense because it's -enough time so that the decided block is propagated through the network before -someone else start deciding on the same block and creating forks. It's very -different from the consensus algorithms in a distributed computing setup where -Tendermint fits. In Tendermint, where we talk about the timing dependency, they -are really part of this 3-communication step protocol I just explained. We have -the following assumption: If the good guys are not able to communicate timely -and reliably without having message loss within a round, the Tendermint will -not make progress—it will not be making blocks. So if you are in a completely -asynchronous network where messages get lost or delayed unpredictably, -Tendermint will not make progress, it will not create forks, but it will not -decide, it will not tell you what is the next block. For termination, it's a -liveness property of consensus. It's a guarantee to decide. We do need timing -assumptions. Within a round, correct validators are able to communicate to each -other the consensus messages, not the transactions, but consensus messages. -They need to communicate in a timely and reliable fashion. But this doesn't -need to hold forever. It's just that what we are assuming when we say it's a -partially synchronous system, we assume that the system will be going through a -period of asynchrony, where we don't have this guarantee; the messages will be -delayed or some will be lost and then will not make progress for some period of -time, or we're not guaranteed to make progress. And the period of synchrony -where these guarantees hold. And if we think about internet, internet is best -described using such a model. Sometimes when we send a message to SF to -Belgrade, it takes 100 ms, sometimes it takes 300 ms, sometimes it takes 1 s. -But in most cases, it takes 100 ms or less than this. - -There is one thing which would be really nice if you understand it. In a global -wide area network, we can't make assumption on the communication unless we are -very conservative about this. If you want to be very fast, then we can't make -assumption and say we'll be for sure communicating with 1 ms communication -delay. Because of the complexity and various congestion issues on the network, -it might happen that during a short period of time, this doesn't hold. If this -doesn't hold and you depend on this for correctness of your protocol, you will -have a fork. So the partially synchronous protocol, most of them like -Tendermint, they don't depend on the timing assumption from the internet for -correctness. This is where we state: safety always. So we never make a fork no -matter how bad our estimates about the internet communication delays are. We'll -never make a fork, but we do make some assumptions, and these assumptions are -built-in our timeouts in our protocol which are actually adaptive. So we are -adapting to the current condition and this is where we're saying...We do assume -some properties, or some communication delays, to eventually hold on the -network. During this period, we guarantee that we will be deciding and -committing blocks. And we will be doing this very fast. We will be basically on -the speed of the current network. - -**CC**: We make liveness assumptions based on the integrity of the validator -businesses, assuming they're up and running fine. - -**ZM**: This is where we are saying, the protocol will be live if we have at -most 1/3, or a bit less than 1/3, of faulty validators. Which means that all -other guys should be online and available. This is also for liveness. This is -related to the condition that we are not able to make progress in rounds if we -don't receive enough messages. If half of our voting power, or half of our -validators are down, we don't have enough messages, so the protocol is -completely blocked. It doesn't make progress in a round, which means it's not -able to be signed. So it's completely critical for Tendermint that we make -progress in rounds. It's like breathing. Tendermint is breathing. If there is -no progress, it's dead; it's blocked, we're not able to breathe, that's why -we're not able to make progress. - -**CC**: How does Tendermint compare to other consensus algos? - -**ZM**: Tendermint is a very interesting protocol. From an academic point of -view, I'm convinced that there is value there. Hopefully, we prove it by -publishing it on some good conference. What is novel is, if we compare first -Tendermint to this existing BFT problem, it's a continuation of academic -research on BFT consensus. What is novel in Tendermint is that it somehow -merges consensus protocol with gossip. This is completely novel idea. -Originally, in BFT, people were assuming the single administration domain, -small number of nodes, local area network, 4-7 nodes max. If you look at the -research paper, 99% of them have this kind of setup. Wide area was studied but -there is significantly less work in wide area networks. No one studied how to -scale those protocols to hundreds or thousands of nodes before blockchain. It -was always a single administration domain. So in Tendermint now, you are able -to reach consensus among different administration domains which are potentially -hundreds of them in wide area network. The system model is potentially harder -because we have more nodes and wide area network. The second thing is that: -normally, in bft protocols, the protocol itself are normally designed in a way -that has two phases, or two parts. The one which is called normal case, which -is normally quite simple, in this normal case. In spite of some failures, which -are part of the normal execution of the protocol, like for example leader -crashes or leader being DDoS'd, they need to go through a quite complex -protocol, which is like being called view change or leader election or -whatever. These two parts of the same protocol are having quite different -complexity. And most of the people only understand this normal case. In -Tendermint, there is no this difference. We have only one protocol, there are -not two protocols. It's always the same steps and they are much closer to the -normal case than this complex view change protocol. - -_This is a bit too technical but this is on a high level things to remember, -that: The system it addresses it's harder than the others and the algorithm -complexity in Tendermint is simpler._ The initial goal of Jae and Bucky which -is inspired by Raft, is that it's simpler so normal engineers could understand. - -**CC**: Can you expand on the termination requirement? - -_Important point about Liveness in Tendermint_ - -**ZM**: In Tendermint, we are saying, for termination, we are making assumption -that the system is partially synchronous. And in a partially synchronous system -model, we are able to mathematically prove that the protocol will make -decisions; it will decide. - -**CC**: What is a persistent peer? - -**ZM**: It's a list of peer identities, which you will try to establish -connection to them, in case connection is broken, Tendermint will automatically -try to reestablish connection. These are important peers, you will really try -persistently to establish connection to them. For other peers, you just drop it -and try from your address book to connect to someone else. The address book is a -list of peers which you discover that they exist, because we are talking about a -very dynamic network—so the nodes are coming and going away—and the gossiping -protocol is discovering new nodes and gossiping them around. So every node will -keep the list of new nodes it discovers, and when you need to establish -connection to a peer, you'll look to address book and get some addresses from -there. There's categorization/ranking of nodes there. - -[1]: https://docs.tendermint.com/master/spec/reactors/consensus/proposer-selection.html diff --git a/docs/introduction/README.md b/docs/introduction/README.md index 01c8ecddf..abdf81333 100644 --- a/docs/introduction/README.md +++ b/docs/introduction/README.md @@ -1,5 +1,5 @@ --- -order: 1 +order: false parent: title: Introduction order: 1 @@ -15,6 +15,6 @@ Get Tendermint up-and-running quickly with the [quick-start guide](./quick-start Detailed [installation instructions](./install.md). -## What is Tendermint? +## What is Tendermint Dive into [what Tendermint is and why](./what-is-tendermint.md)! diff --git a/docs/introduction/architecture.md b/docs/introduction/architecture.md index 066814f23..3b70e7015 100644 --- a/docs/introduction/architecture.md +++ b/docs/introduction/architecture.md @@ -1,97 +1,115 @@ +--- +order: false +--- # Tendermint Architectural Overview -_November 2019_ + + +> **November 2019** Over the next few weeks, @brapse, @marbar3778 and I (@tessr) are having a series of meetings to go over the architecture of Tendermint Core. These are my notes from these meetings, which will either serve as an artifact for onboarding future engineers; or will provide the basis for such a document. ## Communication -There are three forms of communication (e.g., requests, responses, connections) that can happen in Tendermint Core: *internode communication*, *intranode communication*, and *client communication*. -- Internode communication: Happens between a node and other peers. This kind of communication happens over TCP or HTTP. More on this below. -- Intranode communication: Happens within the node itself (i.e., between reactors or other components). These are typically function or method calls, or occasionally happen through an event bus. -- Client communiation: Happens between a client (like a wallet or a browser) and a node on the network. +There are three forms of communication (e.g., requests, responses, connections) that can happen in Tendermint Core: *internode communication*, *intranode communication*, and *client communication*. + +- Internode communication: Happens between a node and other peers. This kind of communication happens over TCP or HTTP. More on this below. +- Intranode communication: Happens within the node itself (i.e., between reactors or other components). These are typically function or method calls, or occasionally happen through an event bus. + +- Client communication: Happens between a client (like a wallet or a browser) and a node on the network. ### Internode Communication Internode communication can happen in two ways: + 1. TCP connections through the p2p package - Most common form of internode communication - Connections between nodes are persisted and shared across reactors, facilitated by the switch. (More on the switch below.) -2. RPC over HTTP +2. RPC over HTTP - Reserved for short-lived, one-off requests - Example: reactor-specific state, like height - - Also possible: websocks connected to channels for notifications (like new transactions) + - Also possible: web-sockets connected to channels for notifications (like new transactions) ### P2P Business (the Switch, the PEX, and the Address Book) When writing a p2p service, there are two primary responsibilities: + 1. Routing: Who gets which messages? -2. Peer management: Who can you talk to? What is their state? And how can you do peer discovery? +2. Peer management: Who can you talk to? What is their state? And how can you do peer discovery? The first responsibility is handled by the Switch: + - Responsible for routing connections between peers - Notably _only handles TCP connections_; RPC/HTTP is separate - Is a dependency for every reactor; all reactors expose a function `setSwitch` - Holds onto channels (channels on the TCP connection--NOT Go channels) and uses them to route -- Is a global object, with a global namespace for messages +- Is a global object, with a global namespace for messages - Similar functionality to libp2p -TODO: More information (maybe) on the implementation of the Switch. +TODO: More information (maybe) on the implementation of the Switch. -The second responsibility is handled by a combination of the PEX and the Address Book. +The second responsibility is handled by a combination of the PEX and the Address Book. + + TODO: What is the PEX and the Address Book? - TODO: What is the PEX and the Address Book? - #### The Nature of TCP, and Introduction to the `mconnection` + Here are some relevant facts about TCP: + 1. All TCP connections have a "frame window size" which represents the packet size to the "confidence;" i.e., if you are sending packets along a new connection, you must start out with small packets. As the packets are received successfully, you can start to send larger and larger packets. (This curve is illustrated below.) This means that TCP connections are slow to spin up. -3. The syn/ack process also means that there's a high overhead for small, frequent messages -4. Sockets are represented by file descriptors. +2. The syn/ack process also means that there's a high overhead for small, frequent messages +3. Sockets are represented by file descriptors. -![tcp-window](tcp-window.png) +![tcp](../imgs/tcp-window.png) In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary. The `mconnection` is represented by a struct, which contains a batch of messages, read and write buffers, and a map of channel IDs to reactors. It communicates with TCP via file descriptors, which it can write to. There is one `mconnection` per peer connection. -The `mconnection` has two methods: `send`, which takes a raw handle to the socket and writes to it; and `trySend`, which writes to a different buffer. (TODO: which buffer?) +The `mconnection` has two methods: `send`, which takes a raw handle to the socket and writes to it; and `trySend`, which writes to a different buffer. (TODO: which buffer?) -The `mconnection` is owned by a peer, which is owned (potentially with many other peers) by a (global) transport, which is owned by the (global) switch: +The `mconnection` is owned by a peer, which is owned (potentially with many other peers) by a (global) transport, which is owned by the (global) switch: + ``` switch - transport - peer - mconnection - peer - mconnection - peer - mconnection + transport + peer + mconnection + peer + mconnection + peer + mconnection ``` + -## node.go +## node.go node.go is the entrypoint for running a node. It sets up reactors, sets up the switch, and registers all the RPC endpoints for a node. ## Types of Nodes -1. Validator Node: + + +1. Validator Node: 2. Full Node: 3. Seed Node: -TODO: Flesh out the differences between the types of nodes and how they're configured. +TODO: Flesh out the differences between the types of nodes and how they're configured. -## Reactors +## Reactors + +Here are some Reactor Facts: -Here are some Reactor Facts: - Every reactor holds a pointer to the global switch (set through `SetSwitch()`) - The switch holds a pointer to every reactor (`addReactor()`) - Every reactor gets set up in node.go (and if you are using custom reactors, this is where you specify that) -- `addReactor` is called by the switch; `addReactor` calls `setSwitch` for that reactor -- There's an assumption that all the reactors are added before +- `addReactor` is called by the switch; `addReactor` calls `setSwitch` for that reactor +- There's an assumption that all the reactors are added before - Sometimes reactors talk to each other by fetching references to one another via the switch (which maintains a pointer to each reactor). **Question: Can reactors talk to each other in any other way?** Furthermore, all reactors expose: + 1. A TCP channel -2. A `receive` method +2. A `receive` method 3. An `addReactor` call The `receive` method can be called many times by the mconnection. It has the same signature across all reactors. @@ -99,15 +117,19 @@ The `receive` method can be called many times by the mconnection. It has the sam The `addReactor` call does a for loop over all the channels on the reactor and creates a map of channel IDs->reactors. The switch holds onto this map, and passes it to the _transport_, a thin wrapper around TCP connections. The following is an exhaustive (?) list of reactors: + - Blockchain Reactor -- Consensus Reactor -- Evidence Reactor +- Consensus Reactor +- Evidence Reactor - Mempool Reactor - PEX Reactor Each of these will be discussed in more detail later. -### Blockchain Reactor -The blockchain reactor has two responsibilities: -1. Serve blocks at the request of peers -2. TODO: learn about the second responsibility of the blockchain reactor \ No newline at end of file + +### Blockchain Reactor + +The blockchain reactor has two responsibilities: + +1. Serve blocks at the request of peers +2. TODO: learn about the second responsibility of the blockchain reactor diff --git a/docs/introduction/install.md b/docs/introduction/install.md index b94230cc1..7c92c3b05 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -13,50 +13,47 @@ To download pre-built binaries, see the [releases page](https://github.com/tende You'll need `go` [installed](https://golang.org/doc/install) and the required environment variables set, which can be done with the following commands: -```bash +```sh echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile -echo export GO111MODULE=on >> ~/.bash_profile ``` ### Get Source Code -``` -mkdir -p $GOPATH/src/github.com/tendermint -cd $GOPATH/src/github.com/tendermint +```sh git clone https://github.com/tendermint/tendermint.git cd tendermint ``` ### Get Tools & Dependencies -``` +```sh make tools ``` ### Compile -``` +```sh make install ``` to put the binary in `$GOPATH/bin` or use: -``` +```sh make build ``` to put the binary in `./build`. -_DISCLAIMER_ The binary of tendermint is build/installed without the DWARF -symbol table. If you would like to build/install tendermint with the DWARF +_DISCLAIMER_ The binary of Tendermint is build/installed without the DWARF +symbol table. If you would like to build/install Tendermint with the DWARF symbol and debug information, remove `-s -w` from `BUILD_FLAGS` in the make file. -The latest tendermint is now installed. You can verify the installation by +The latest Tendermint is now installed. You can verify the installation by running: -``` +```sh tendermint version ``` @@ -64,7 +61,7 @@ tendermint version To start a one-node blockchain with a simple in-process application: -``` +```sh tendermint init tendermint node --proxy_app=kvstore ``` @@ -73,15 +70,13 @@ tendermint node --proxy_app=kvstore If you already have Tendermint installed, and you make updates, simply -``` -cd $GOPATH/src/github.com/tendermint/tendermint +```sh make install ``` To upgrade, run -``` -cd $GOPATH/src/github.com/tendermint/tendermint +```sh git pull origin master make install ``` @@ -92,7 +87,7 @@ Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). Install LevelDB with snappy (optionally). Below are commands for Ubuntu: -``` +```sh sudo apt-get update sudo apt install build-essential @@ -111,21 +106,21 @@ wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ Set a database backend to `cleveldb`: -``` +```toml # config/config.toml db_backend = "cleveldb" ``` To install Tendermint, run: -``` -CGO_LDFLAGS="-lsnappy" make install_c +```sh +CGO_LDFLAGS="-lsnappy" make install TENDERMINT_BUILD_OPTIONS=cleveldb ``` or run: -``` -CGO_LDFLAGS="-lsnappy" make build_c +```sh +CGO_LDFLAGS="-lsnappy" make build TENDERMINT_BUILD_OPTIONS=cleveldb ``` which puts the binary in `./build`. diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 4602381b5..43ff975d9 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -6,7 +6,7 @@ order: 2 ## Overview -This is a quick start guide. If you have a vague idea about how Tendermint +This is a quick start guide. If you have a vague idea about how Tendermint works and want to get started right away, continue. ## Install @@ -16,9 +16,9 @@ works and want to get started right away, continue. To quickly get Tendermint installed on a fresh Ubuntu 16.04 machine, use [this script](https://git.io/fFfOR). -WARNING: do not run this on your local machine. +> :warning: Do not copy scripts to run on your machine without knowing what they do. -``` +```sh curl -L https://git.io/fFfOR | bash source ~/.profile ``` @@ -33,7 +33,7 @@ For manual installation, see the [install instructions](install.md) Running: -``` +```sh tendermint init ``` @@ -41,7 +41,7 @@ will create the required files for a single, local node. These files are found in `$HOME/.tendermint`: -``` +```sh $ ls $HOME/.tendermint config data @@ -56,48 +56,50 @@ Configuring a cluster is covered further below. ## Local Node -Start tendermint with a simple in-process application: +Start Tendermint with a simple in-process application: -``` +```sh tendermint node --proxy_app=kvstore ``` +> Note: `kvstore` is a non persistent app, if you would like to run an application with persistence run `--proxy_app=persistent_kvstore` + and blocks will start to stream in: -``` +```sh I[01-06|01:45:15.592] Executed block module=state height=1 validTxs=0 invalidTxs=0 I[01-06|01:45:15.624] Committed state module=state height=1 txs=0 appHash= ``` Check the status with: -``` +```sh curl -s localhost:26657/status ``` ### Sending Transactions -With the kvstore app running, we can send transactions: +With the KVstore app running, we can send transactions: -``` +```sh curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' ``` and check that it worked with: -``` +```sh curl -s 'localhost:26657/abci_query?data="abcd"' ``` We can send transactions with a key and value too: -``` +```sh curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' ``` and query the key: -``` +```sh curl -s 'localhost:26657/abci_query?data="name"' ``` @@ -111,7 +113,7 @@ addresses below as IP1, IP2, IP3, IP4. Then, `ssh` into each machine, and execute [this script](https://git.io/fFfOR): -``` +```sh curl -L https://git.io/fFfOR | bash source ~/.profile ``` @@ -122,7 +124,7 @@ Next, use the `tendermint testnet` command to create four directories of config Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4. -``` +```sh tendermint show_node_id --home ./mytestnet/node0 tendermint show_node_id --home ./mytestnet/node1 tendermint show_node_id --home ./mytestnet/node2 @@ -131,7 +133,7 @@ tendermint show_node_id --home ./mytestnet/node3 Finally, from each machine, run: -``` +```sh tendermint node --home ./mytestnet/node0 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" tendermint node --home ./mytestnet/node1 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" tendermint node --home ./mytestnet/node2 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index ad808c460..9cff48a2a 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -2,7 +2,7 @@ order: 4 --- -# What is Tendermint? +# What is Tendermint Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works @@ -74,7 +74,7 @@ Tendermint is in essence similar software, but with two key differences: the application logic that's right for them, from key-value store to cryptocurrency to e-voting platform and beyond. -### Bitcoin, Ethereum, etc. +### Bitcoin, Ethereum, etc Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, Ethereum, etc. with the goal of providing a more efficient and secure @@ -227,7 +227,7 @@ design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI. -![](../imgs/abci.png) +![abci](../imgs/abci.png) ## A Note on Determinism @@ -263,7 +263,7 @@ Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus protocol. The protocol follows a simple state machine that looks like this: -![](../imgs/consensus_logic.png) +![consensus-logic](../imgs/consensus_logic.png) Participants in the protocol are called **validators**; they take turns proposing blocks of transactions and voting on them. Blocks are @@ -299,8 +299,8 @@ introduces a few **locking** rules which modulate which paths can be followed in the flow diagram. Once a validator precommits a block, it is locked on that block. Then, -1. it must prevote for the block it is locked on -2. it can only unlock, and precommit for a new block, if there is a +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a polka for that block in a later round ## Stake @@ -325,8 +325,6 @@ The [Cosmos Network](https://cosmos.network) is designed to use this Proof-of-Stake mechanism across an array of cryptocurrencies implemented as ABCI applications. -The following diagram is Tendermint in a (technical) nutshell. [See here -for high resolution -version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). +The following diagram is Tendermint in a (technical) nutshell. -![](../imgs/tm-transaction-flow.png) +![tx-flow](../imgs/tm-transaction-flow.png) diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md index 1adb1c753..632ed5659 100644 --- a/docs/networks/docker-compose.md +++ b/docs/networks/docker-compose.md @@ -20,9 +20,7 @@ docker image. Note the binary will be mounted into the container so it can be updated without rebuilding the image. -``` -cd $GOPATH/src/github.com/tendermint/tendermint - +```sh # Build the linux binary in ./build make build-linux @@ -34,7 +32,7 @@ make build-docker-localnode To start a 4 node testnet run: -``` +```sh make localnet-start ``` @@ -48,9 +46,8 @@ on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. To update the binary, just rebuild it and restart the nodes: -``` +```sh make build-linux -make localnet-stop make localnet-start ``` @@ -62,31 +59,46 @@ calling the `tendermint testnet` command. The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. -To change the number of validators / non-validators change the `localnet-start` Makefile target: +To change the number of validators / non-validators change the `localnet-start` Makefile target [here](../../Makefile): -``` +```makefile localnet-start: localnet-stop - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 5 --n 3 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi - docker-compose up + @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 5 --n 3 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi + docker-compose up ``` The command now will generate config files for 5 validators and 3 -non-validators network. - -Before running it, don't forget to cleanup the old files: +non-validators. Along with generating new config files the docker-compose file needs to be edited. +Adding 4 more nodes is required in order to fully utilize the config files that were generated. +```yml + node3: # bump by 1 for every node + container_name: node3 # bump by 1 for every node + image: "tendermint/localnode" + environment: + - ID=3 + - LOG=${LOG:-tendermint.log} + ports: + - "26663-26664:26656-26657" # Bump 26663-26664 by one for every node + volumes: + - ./build:/tendermint:Z + networks: + localnet: + ipv4_address: 192.167.10.5 # bump the final digit by 1 for every node ``` -cd $GOPATH/src/github.com/tendermint/tendermint +Before running it, don't forget to cleanup the old files: + +```sh # Clear the build folder rm -rf ./build/node* ``` -## Configuring abci containers +## Configuring ABCI containers -To use your own abci applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/master/docker-compose.yml) file and add image to your abci application. +To use your own ABCI applications with 4-node setup edit the [docker-compose.yaml](https://github.com/tendermint/tendermint/blob/master/docker-compose.yml) file and add image to your ABCI application. -``` +```yml abci0: container_name: abci0 image: "abci-image" @@ -133,9 +145,9 @@ To use your own abci applications with 4-node setup edit the [docker-compose.yam ``` -Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's abci. +Override the [command](https://github.com/tendermint/tendermint/blob/master/networks/local/localnode/Dockerfile#L12) in each node to connect to it's ABCI. -``` +```yml node0: container_name: node0 image: "tendermint/localnode" diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index ec6cee1ba..b11df3710 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -28,7 +28,7 @@ node testnet. The script more or less does everything described below. - Create SSH keys (`ssh-keygen`) - Set environment variables: -``` +```sh export DO_API_TOKEN="abcdef01234567890abcdef01234567890" export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" ``` @@ -40,13 +40,13 @@ These will be used by both `terraform` and `ansible`. This step will create four Digital Ocean droplets. First, go to the correct directory: -``` +```sh cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform ``` then: -``` +```sh terraform init terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" ``` @@ -72,13 +72,13 @@ number of droplets created). To create the node files run: -``` +```sh tendermint testnet ``` Then, to configure our droplets run: -``` +```sh ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/mytestnet ``` @@ -87,7 +87,7 @@ configuration files to run a testnet. Next, we run the install role: -``` +```sh ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml ``` @@ -104,14 +104,14 @@ increasing). Next, open `roles/install/templates/systemd.service.j2` and look for the line `ExecStart` which should look something like: -``` +```sh ExecStart=/usr/bin/tendermint node --proxy_app=kvstore ``` and add the `--p2p.persistent_peers` flag with the relevant information for each node. The resulting file should look something like: -``` +```sh [Unit] Description={{service}} Requires=network-online.target @@ -132,13 +132,13 @@ WantedBy=multi-user.target Then, stop the nodes: -``` +```sh ansible-playbook -i inventory/digital_ocean.py -l sentrynet stop.yml ``` Finally, we run the install role again: -``` +```sh ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml ``` @@ -148,7 +148,7 @@ increasing. Your testnet is now up and running :) Peek at the logs with the status role: -``` +```sh ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml ``` @@ -160,7 +160,7 @@ service provider. You can set up your nodes to log there automatically. Create an account and get your API key from the notes on [this page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: -``` +```sh yum install systemd-devel || echo "This will only work on RHEL-based systems." apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." @@ -172,6 +172,6 @@ ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO To remove your droplets, run: -``` +```sh terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" ``` diff --git a/docs/package-lock.json b/docs/package-lock.json index c3f277d83..b65b61822 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -4,60 +4,163 @@ "lockfileVersion": 1, "requires": true, "dependencies": { - "@babel/code-frame": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.8.3.tgz", - "integrity": "sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g==", + "@algolia/cache-browser-local-storage": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.6.0.tgz", + "integrity": "sha512-3ObeNwZ5gfDvKPp9NXdtbBrCtz/yR1oyDu/AReG73Oanua3y30Y11p7VQzzpLe2R/gDCLOGdRgr17h11lGy1Hg==", "requires": { - "@babel/highlight": "^7.8.3" + "@algolia/cache-common": "4.6.0" } }, - "@babel/compat-data": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.9.0.tgz", - "integrity": "sha512-zeFQrr+284Ekvd9e7KAX954LkapWiOmQtsfHirhxqfdlX6MEC32iRE+pqUGlYIBchdevaCwvzxWGSy/YBNI85g==", + "@algolia/cache-common": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.6.0.tgz", + "integrity": "sha512-mEedrPb2O3WwtiIHggFoIhTbHVCMNikxMiiN9kqmwZkdDfClfxm435OUGZfAl67rBZfc0DNs/jmPM2mUoefM9A==" + }, + "@algolia/cache-in-memory": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.6.0.tgz", + "integrity": "sha512-J7ayGokVWEFkuLxzgrIsPS4k1/ZndyGVpG/qPrG9RHVrs7ZogEhUSY1tbEyUlW3mGy7diIh+/52dtohDL/nbGQ==", "requires": { - "browserslist": "^4.9.1", - "invariant": "^2.2.4", - "semver": "^5.5.0" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } + "@algolia/cache-common": "4.6.0" + } + }, + "@algolia/client-account": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.6.0.tgz", + "integrity": "sha512-0t2yU6wNBNJgAmrARHrM1llhANyPT4Q/1wu6yEzv2WfPXlfsHwMhtKYNti4/k8eswwUt9wAri10WFV6TJI48rg==", + "requires": { + "@algolia/client-common": "4.6.0", + "@algolia/client-search": "4.6.0", + "@algolia/transporter": "4.6.0" + } + }, + "@algolia/client-analytics": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.6.0.tgz", + "integrity": "sha512-7yfn9pabA21Uw2iZjW1MNN4IJUT5y/YSg+ZJ+3HqBB6SgzOOqY0N3fATsPeGuN9EqSfVnqvnIrJMS8mI0b5FzQ==", + "requires": { + "@algolia/client-common": "4.6.0", + "@algolia/client-search": "4.6.0", + "@algolia/requester-common": "4.6.0", + "@algolia/transporter": "4.6.0" + } + }, + "@algolia/client-common": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.6.0.tgz", + "integrity": "sha512-60jK0LK5H+6q6HyyMyoBBD0fIs8zZzJt6BiyJGQG90o3gUV/SnjiNxO9Bx0RRlqdkE5s0OYFu1L7P9Y5TX7oAw==", + "requires": { + "@algolia/requester-common": "4.6.0", + "@algolia/transporter": "4.6.0" + } + }, + "@algolia/client-recommendation": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/client-recommendation/-/client-recommendation-4.6.0.tgz", + "integrity": "sha512-j+Yb1z5QeIRDCCO+9hS9oZS3KNqRogPHDbJJsLTt6pkrs4CG2UVLVV67M977B1nzJ9OzaEki3VbpGQhRhPGNfQ==", + "requires": { + "@algolia/client-common": "4.6.0", + "@algolia/requester-common": "4.6.0", + "@algolia/transporter": "4.6.0" + } + }, + "@algolia/client-search": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.6.0.tgz", + "integrity": "sha512-+qA1NA88YnXvuCKifegfgts1RQs8IzcwccQqyurz8ins4hypZL1tXN2BkrOqqDIgvYIrUvFyhv+gLO6U9PpDUA==", + "requires": { + "@algolia/client-common": "4.6.0", + "@algolia/requester-common": "4.6.0", + "@algolia/transporter": "4.6.0" + } + }, + "@algolia/logger-common": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.6.0.tgz", + "integrity": "sha512-F+0HTGSQzJfWsX/cJq2l4eG2Y5JA6pqZ0YETyo5XJhZX4JaDrGszVKuOqp8kovZF/Ifebywxb8JdCiSUskmbig==" + }, + "@algolia/logger-console": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.6.0.tgz", + "integrity": "sha512-ers7OhfU6qBQl6s7MOe5gNUkcpa7LGrhEzDWnD0cUwLSd5BvWt7zEN69O2CZVbvAUZYlZ5zJTzMMa49s0VXrKQ==", + "requires": { + "@algolia/logger-common": "4.6.0" + } + }, + "@algolia/requester-browser-xhr": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.6.0.tgz", + "integrity": "sha512-ugrJT25VUkoKrl5vJVFclMdogbhTiDZ38Gss4xfTiSsP/SGE/0ei5VEOMEcj/bjkurJjPky1HfJZ3ykJhIsfCA==", + "requires": { + "@algolia/requester-common": "4.6.0" + } + }, + "@algolia/requester-common": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.6.0.tgz", + "integrity": "sha512-DJ5iIGBGrRudimaaFnpBFM19pv8SsXiMYuukn9q1GgQh2mPPBCBBJiezKc7+OzE1UyCVrAFBpR/hrJnflZnRdQ==" + }, + "@algolia/requester-node-http": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.6.0.tgz", + "integrity": "sha512-MPZK3oZz0jSBsqrGiPxv7LOKMUNknlaRNyRDy0v/ASIYG+GvLhGTdEzG5Eyw5tgSvBr8CWrWM5tDC31EH40Ndw==", + "requires": { + "@algolia/requester-common": "4.6.0" + } + }, + "@algolia/transporter": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.6.0.tgz", + "integrity": "sha512-xp+HI8sB8gLCvP00scaOVPQEk5H7nboWUxrwLKyVUvtUO4o003bOfFPsH86NRyu5Dv7fzX9b8EH3rVxcLOhjqg==", + "requires": { + "@algolia/cache-common": "4.6.0", + "@algolia/logger-common": "4.6.0", + "@algolia/requester-common": "4.6.0" + } + }, + "@babel/code-frame": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", + "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", + "requires": { + "@babel/highlight": "^7.10.4" } }, + "@babel/compat-data": { + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.12.5.tgz", + "integrity": "sha512-DTsS7cxrsH3by8nqQSpFSyjSfSYl57D6Cf4q8dW3LK83tBKBDCkfcay1nYkXq1nIHXnpX8WMMb/O25HOy3h1zg==" + }, "@babel/core": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.9.0.tgz", - "integrity": "sha512-kWc7L0fw1xwvI0zi8OKVBuxRVefwGOrKSQMvrQ3dW+bIIavBY3/NpXmpjMy7bQnLgwgzWQZ8TlM57YHpHNHz4w==", - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.9.0", - "@babel/helper-module-transforms": "^7.9.0", - "@babel/helpers": "^7.9.0", - "@babel/parser": "^7.9.0", - "@babel/template": "^7.8.6", - "@babel/traverse": "^7.9.0", - "@babel/types": "^7.9.0", + "version": "7.12.3", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.3.tgz", + "integrity": "sha512-0qXcZYKZp3/6N2jKYVxZv0aNCsxTSVCiK72DTiTYZAu7sjg73W0/aynWjMbiGd87EQL4WyA8reiJVh92AVla9g==", + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.1", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helpers": "^7.12.1", + "@babel/parser": "^7.12.3", + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.12.1", + "@babel/types": "^7.12.1", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.1", "json5": "^2.1.2", - "lodash": "^4.17.13", + "lodash": "^4.17.19", "resolve": "^1.3.2", "semver": "^5.4.1", "source-map": "^0.5.0" }, "dependencies": { "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "json5": { @@ -86,13 +189,12 @@ } }, "@babel/generator": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.9.5.tgz", - "integrity": "sha512-GbNIxVB3ZJe3tLeDm1HSn2AhuD/mVcyLDpgtLXa5tplmWrJdF/elxB56XNqCuD6szyNkDi6wuoKXln3QeBmCHQ==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.12.5.tgz", + "integrity": "sha512-m16TQQJ8hPt7E+OS/XVQg/7U184MLXtvuGbCdA7na61vha+ImkyyNM/9DDA0unYCVZn3ZOhng+qz48/KBOT96A==", "requires": { - "@babel/types": "^7.9.5", + "@babel/types": "^7.12.5", "jsesc": "^2.5.1", - "lodash": "^4.17.13", "source-map": "^0.5.0" }, "dependencies": { @@ -104,31 +206,30 @@ } }, "@babel/helper-annotate-as-pure": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.8.3.tgz", - "integrity": "sha512-6o+mJrZBxOoEX77Ezv9zwW7WV8DdluouRKNY/IR5u/YTMuKHgugHOzYWlYvYLpLA9nPsQCAAASpCIbjI9Mv+Uw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.10.4.tgz", + "integrity": "sha512-XQlqKQP4vXFB7BN8fEEerrmYvHp3fK/rBkRFz9jaJbzK0B1DSfej9Kc7ZzE8Z/OnId1jpJdNAZ3BFQjWG68rcA==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.10.4" } }, "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.8.3.tgz", - "integrity": "sha512-5eFOm2SyFPK4Rh3XMMRDjN7lBH0orh3ss0g3rTYZnBQ+r6YPj7lgDyCvPphynHvUrobJmeMignBr6Acw9mAPlw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.10.4.tgz", + "integrity": "sha512-L0zGlFrGWZK4PbT8AszSfLTM5sDU1+Az/En9VrdT8/LmEiJt4zXt+Jve9DCAnQcbqDhCI+29y/L93mrDzddCcg==", "requires": { - "@babel/helper-explode-assignable-expression": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/helper-explode-assignable-expression": "^7.10.4", + "@babel/types": "^7.10.4" } }, "@babel/helper-compilation-targets": { - "version": "7.8.7", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.8.7.tgz", - "integrity": "sha512-4mWm8DCK2LugIS+p1yArqvG1Pf162upsIsjE7cNBjez+NjliQpVhj20obE520nao0o14DaTnFJv+Fw5a0JpoUw==", - "requires": { - "@babel/compat-data": "^7.8.6", - "browserslist": "^4.9.1", - "invariant": "^2.2.4", - "levenary": "^1.1.1", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.12.5.tgz", + "integrity": "sha512-+qH6NrscMolUlzOYngSBMIOQpKUGPPsc61Bu5W10mg84LxZ7cmvnBHzARKbDoFxVvqqAbj6Tg6N7bSrWSPXMyw==", + "requires": { + "@babel/compat-data": "^7.12.5", + "@babel/helper-validator-option": "^7.12.1", + "browserslist": "^4.14.5", "semver": "^5.5.0" }, "dependencies": { @@ -140,305 +241,343 @@ } }, "@babel/helper-create-class-features-plugin": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.9.5.tgz", - "integrity": "sha512-IipaxGaQmW4TfWoXdqjY0TzoXQ1HRS0kPpEgvjosb3u7Uedcq297xFqDQiCcQtRRwzIMif+N1MLVI8C5a4/PAA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.12.1.tgz", + "integrity": "sha512-hkL++rWeta/OVOBTRJc9a5Azh5mt5WgZUGAKMD8JM141YsE08K//bp1unBBieO6rUKkIPyUE0USQ30jAy3Sk1w==", "requires": { - "@babel/helper-function-name": "^7.9.5", - "@babel/helper-member-expression-to-functions": "^7.8.3", - "@babel/helper-optimise-call-expression": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.6", - "@babel/helper-split-export-declaration": "^7.8.3" + "@babel/helper-function-name": "^7.10.4", + "@babel/helper-member-expression-to-functions": "^7.12.1", + "@babel/helper-optimise-call-expression": "^7.10.4", + "@babel/helper-replace-supers": "^7.12.1", + "@babel/helper-split-export-declaration": "^7.10.4" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.8.8", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.8.tgz", - "integrity": "sha512-LYVPdwkrQEiX9+1R29Ld/wTrmQu1SSKYnuOk3g0CkcZMA1p0gsNxJFj/3gBdaJ7Cg0Fnek5z0DsMULePP7Lrqg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.1.tgz", + "integrity": "sha512-rsZ4LGvFTZnzdNZR5HZdmJVuXK8834R5QkF3WvcnBhrlVtF0HSIUC6zbreL9MgjTywhKokn8RIYRiq99+DLAxA==", "requires": { - "@babel/helper-annotate-as-pure": "^7.8.3", - "@babel/helper-regex": "^7.8.3", - "regexpu-core": "^4.7.0" + "@babel/helper-annotate-as-pure": "^7.10.4", + "@babel/helper-regex": "^7.10.4", + "regexpu-core": "^4.7.1" } }, "@babel/helper-define-map": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-map/-/helper-define-map-7.8.3.tgz", - "integrity": "sha512-PoeBYtxoZGtct3md6xZOCWPcKuMuk3IHhgxsRRNtnNShebf4C8YonTSblsK4tvDbm+eJAw2HAPOfCr+Q/YRG/g==", + "version": "7.10.5", + "resolved": "https://registry.npmjs.org/@babel/helper-define-map/-/helper-define-map-7.10.5.tgz", + "integrity": "sha512-fMw4kgFB720aQFXSVaXr79pjjcW5puTCM16+rECJ/plGS+zByelE8l9nCpV1GibxTnFVmUuYG9U8wYfQHdzOEQ==", "requires": { - "@babel/helper-function-name": "^7.8.3", - "@babel/types": "^7.8.3", - "lodash": "^4.17.13" + "@babel/helper-function-name": "^7.10.4", + "@babel/types": "^7.10.5", + "lodash": "^4.17.19" } }, "@babel/helper-explode-assignable-expression": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.8.3.tgz", - "integrity": "sha512-N+8eW86/Kj147bO9G2uclsg5pwfs/fqqY5rwgIL7eTBklgXjcOJ3btzS5iM6AitJcftnY7pm2lGsrJVYLGjzIw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.12.1.tgz", + "integrity": "sha512-dmUwH8XmlrUpVqgtZ737tK88v07l840z9j3OEhCLwKTkjlvKpfqXVIZ0wpK3aeOxspwGrf/5AP5qLx4rO3w5rA==", "requires": { - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/types": "^7.12.1" } }, "@babel/helper-function-name": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.9.5.tgz", - "integrity": "sha512-JVcQZeXM59Cd1qanDUxv9fgJpt3NeKUaqBqUEvfmQ+BCOKq2xUgaWZW2hr0dkbyJgezYuplEoh5knmrnS68efw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.10.4.tgz", + "integrity": "sha512-YdaSyz1n8gY44EmN7x44zBn9zQ1Ry2Y+3GTA+3vH6Mizke1Vw0aWDM66FOYEPw8//qKkmqOckrGgTYa+6sceqQ==", "requires": { - "@babel/helper-get-function-arity": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.9.5" + "@babel/helper-get-function-arity": "^7.10.4", + "@babel/template": "^7.10.4", + "@babel/types": "^7.10.4" } }, "@babel/helper-get-function-arity": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz", - "integrity": "sha512-FVDR+Gd9iLjUMY1fzE2SR0IuaJToR4RkCDARVfsBBPSP53GEqSFjD8gNyxg246VUyc/ALRxFaAK8rVG7UT7xRA==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.4.tgz", + "integrity": "sha512-EkN3YDB+SRDgiIUnNgcmiD361ti+AVbL3f3Henf6dqqUyr5dMsorno0lJWJuLhDhkI5sYEpgj6y9kB8AOU1I2A==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.10.4" } }, "@babel/helper-hoist-variables": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.8.3.tgz", - "integrity": "sha512-ky1JLOjcDUtSc+xkt0xhYff7Z6ILTAHKmZLHPxAhOP0Nd77O+3nCsd6uSVYur6nJnCI029CrNbYlc0LoPfAPQg==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.10.4.tgz", + "integrity": "sha512-wljroF5PgCk2juF69kanHVs6vrLwIPNp6DLD+Lrl3hoQ3PpPPikaDRNFA+0t81NOoMt2DL6WW/mdU8k4k6ZzuA==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.10.4" } }, "@babel/helper-member-expression-to-functions": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.8.3.tgz", - "integrity": "sha512-fO4Egq88utkQFjbPrSHGmGLFqmrshs11d46WI+WZDESt7Wu7wN2G2Iu+NMMZJFDOVRHAMIkB5SNh30NtwCA7RA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.1.tgz", + "integrity": "sha512-k0CIe3tXUKTRSoEx1LQEPFU9vRQfqHtl+kf8eNnDqb4AUJEy5pz6aIiog+YWtVm2jpggjS1laH68bPsR+KWWPQ==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.12.1" } }, "@babel/helper-module-imports": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.8.3.tgz", - "integrity": "sha512-R0Bx3jippsbAEtzkpZ/6FIiuzOURPcMjHp+Z6xPe6DtApDJx+w7UYyOLanZqO8+wKR9G10s/FmHXvxaMd9s6Kg==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.12.5.tgz", + "integrity": "sha512-SR713Ogqg6++uexFRORf/+nPXMmWIn80TALu0uaFb+iQIUoR7bOC7zBWyzBs5b3tBBJXuyD0cRu1F15GyzjOWA==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.12.5" } }, "@babel/helper-module-transforms": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.9.0.tgz", - "integrity": "sha512-0FvKyu0gpPfIQ8EkxlrAydOWROdHpBmiCiRwLkUiBGhCUPRRbVD2/tm3sFr/c/GWFrQ/ffutGUAnx7V0FzT2wA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.12.1.tgz", + "integrity": "sha512-QQzehgFAZ2bbISiCpmVGfiGux8YVFXQ0abBic2Envhej22DVXV9nCFaS5hIQbkyo1AdGb+gNME2TSh3hYJVV/w==", "requires": { - "@babel/helper-module-imports": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.6", - "@babel/helper-simple-access": "^7.8.3", - "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/template": "^7.8.6", - "@babel/types": "^7.9.0", - "lodash": "^4.17.13" + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-replace-supers": "^7.12.1", + "@babel/helper-simple-access": "^7.12.1", + "@babel/helper-split-export-declaration": "^7.11.0", + "@babel/helper-validator-identifier": "^7.10.4", + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.12.1", + "@babel/types": "^7.12.1", + "lodash": "^4.17.19" } }, "@babel/helper-optimise-call-expression": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.8.3.tgz", - "integrity": "sha512-Kag20n86cbO2AvHca6EJsvqAd82gc6VMGule4HwebwMlwkpXuVqrNRj6CkCV2sKxgi9MyAUnZVnZ6lJ1/vKhHQ==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.4.tgz", + "integrity": "sha512-n3UGKY4VXwXThEiKrgRAoVPBMqeoPgHVqiHZOanAJCG9nQUL2pLRQirUzl0ioKclHGpGqRgIOkgcIJaIWLpygg==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.10.4" } }, "@babel/helper-plugin-utils": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.8.3.tgz", - "integrity": "sha512-j+fq49Xds2smCUNYmEHF9kGNkhbet6yVIBp4e6oeQpH1RUs/Ir06xUKzDjDkGcaaokPiTNs2JBWHjaE4csUkZQ==" + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz", + "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==" }, "@babel/helper-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-regex/-/helper-regex-7.8.3.tgz", - "integrity": "sha512-BWt0QtYv/cg/NecOAZMdcn/waj/5P26DR4mVLXfFtDokSR6fyuG0Pj+e2FqtSME+MqED1khnSMulkmGl8qWiUQ==", + "version": "7.10.5", + "resolved": "https://registry.npmjs.org/@babel/helper-regex/-/helper-regex-7.10.5.tgz", + "integrity": "sha512-68kdUAzDrljqBrio7DYAEgCoJHxppJOERHOgOrDN7WjOzP0ZQ1LsSDRXcemzVZaLvjaJsJEESb6qt+znNuENDg==", "requires": { - "lodash": "^4.17.13" + "lodash": "^4.17.19" } }, "@babel/helper-remap-async-to-generator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.8.3.tgz", - "integrity": "sha512-kgwDmw4fCg7AVgS4DukQR/roGp+jP+XluJE5hsRZwxCYGg+Rv9wSGErDWhlI90FODdYfd4xG4AQRiMDjjN0GzA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.12.1.tgz", + "integrity": "sha512-9d0KQCRM8clMPcDwo8SevNs+/9a8yWVVmaE80FGJcEP8N1qToREmWEGnBn8BUlJhYRFz6fqxeRL1sl5Ogsed7A==", "requires": { - "@babel/helper-annotate-as-pure": "^7.8.3", - "@babel/helper-wrap-function": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/helper-annotate-as-pure": "^7.10.4", + "@babel/helper-wrap-function": "^7.10.4", + "@babel/types": "^7.12.1" } }, "@babel/helper-replace-supers": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.6.tgz", - "integrity": "sha512-PeMArdA4Sv/Wf4zXwBKPqVj7n9UF/xg6slNRtZW84FM7JpE1CbG8B612FyM4cxrf4fMAMGO0kR7voy1ForHHFA==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.12.5.tgz", + "integrity": "sha512-5YILoed0ZyIpF4gKcpZitEnXEJ9UoDRki1Ey6xz46rxOzfNMAhVIJMoune1hmPVxh40LRv1+oafz7UsWX+vyWA==", "requires": { - "@babel/helper-member-expression-to-functions": "^7.8.3", - "@babel/helper-optimise-call-expression": "^7.8.3", - "@babel/traverse": "^7.8.6", - "@babel/types": "^7.8.6" + "@babel/helper-member-expression-to-functions": "^7.12.1", + "@babel/helper-optimise-call-expression": "^7.10.4", + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" } }, "@babel/helper-simple-access": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.8.3.tgz", - "integrity": "sha512-VNGUDjx5cCWg4vvCTR8qQ7YJYZ+HBjxOgXEl7ounz+4Sn7+LMD3CFrCTEU6/qXKbA2nKg21CwhhBzO0RpRbdCw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.12.1.tgz", + "integrity": "sha512-OxBp7pMrjVewSSC8fXDFrHrBcJATOOFssZwv16F3/6Xtc138GHybBfPbm9kfiqQHKhYQrlamWILwlDCeyMFEaA==", "requires": { - "@babel/template": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/types": "^7.12.1" + } + }, + "@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz", + "integrity": "sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA==", + "requires": { + "@babel/types": "^7.12.1" } }, "@babel/helper-split-export-declaration": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz", - "integrity": "sha512-3x3yOeyBhW851hroze7ElzdkeRXQYQbFIb7gLK1WQYsw2GWDay5gAJNw1sWJ0VFP6z5J1whqeXH/WCdCjZv6dA==", + "version": "7.11.0", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.11.0.tgz", + "integrity": "sha512-74Vejvp6mHkGE+m+k5vHY93FX2cAtrw1zXrZXRlG4l410Nm9PxfEiVTn1PjDPV5SnmieiueY4AFg2xqhNFuuZg==", "requires": { - "@babel/types": "^7.8.3" + "@babel/types": "^7.11.0" } }, "@babel/helper-validator-identifier": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.9.5.tgz", - "integrity": "sha512-/8arLKUFq882w4tWGj9JYzRpAlZgiWUJ+dtteNTDqrRBz9Iguck9Rn3ykuBDoUwh2TO4tSAJlrxDUOXWklJe4g==" + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", + "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==" + }, + "@babel/helper-validator-option": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.12.1.tgz", + "integrity": "sha512-YpJabsXlJVWP0USHjnC/AQDTLlZERbON577YUVO/wLpqyj6HAtVYnWaQaN0iUN+1/tWn3c+uKKXjRut5115Y2A==" }, "@babel/helper-wrap-function": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.8.3.tgz", - "integrity": "sha512-LACJrbUET9cQDzb6kG7EeD7+7doC3JNvUgTEQOx2qaO1fKlzE/Bf05qs9w1oXQMmXlPO65lC3Tq9S6gZpTErEQ==", + "version": "7.12.3", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.12.3.tgz", + "integrity": "sha512-Cvb8IuJDln3rs6tzjW3Y8UeelAOdnpB8xtQ4sme2MSZ9wOxrbThporC0y/EtE16VAtoyEfLM404Xr1e0OOp+ow==", "requires": { - "@babel/helper-function-name": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/helper-function-name": "^7.10.4", + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.10.4", + "@babel/types": "^7.10.4" } }, "@babel/helpers": { - "version": "7.9.2", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.9.2.tgz", - "integrity": "sha512-JwLvzlXVPjO8eU9c/wF9/zOIN7X6h8DYf7mG4CiFRZRvZNKEF5dQ3H3V+ASkHoIB3mWhatgl5ONhyqHRI6MppA==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.12.5.tgz", + "integrity": "sha512-lgKGMQlKqA8meJqKsW6rUnc4MdUk35Ln0ATDqdM1a/UpARODdI4j5Y5lVfUScnSNkJcdCRAaWkspykNoFg9sJA==", "requires": { - "@babel/template": "^7.8.3", - "@babel/traverse": "^7.9.0", - "@babel/types": "^7.9.0" + "@babel/template": "^7.10.4", + "@babel/traverse": "^7.12.5", + "@babel/types": "^7.12.5" } }, "@babel/highlight": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.9.0.tgz", - "integrity": "sha512-lJZPilxX7Op3Nv/2cvFdnlepPXDxi29wxteT57Q965oc5R9v86ztx0jfxVrTcBk8C2kcPkkDa2Z4T3ZsPPVWsQ==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", + "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", "requires": { - "@babel/helper-validator-identifier": "^7.9.0", + "@babel/helper-validator-identifier": "^7.10.4", "chalk": "^2.0.0", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.9.4", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.9.4.tgz", - "integrity": "sha512-bC49otXX6N0/VYhgOMh4gnP26E9xnDZK3TmbNpxYzzz9BQLBosQwfyOe9/cXUU3txYhTzLCbcqd5c8y/OmCjHA==" + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.12.5.tgz", + "integrity": "sha512-FVM6RZQ0mn2KCf1VUED7KepYeUWoVShczewOCfm3nzoBybaih51h+sYVVGthW9M6lPByEPTQf+xm27PBdlpwmQ==" }, "@babel/plugin-proposal-async-generator-functions": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.8.3.tgz", - "integrity": "sha512-NZ9zLv848JsV3hs8ryEh7Uaz/0KsmPLqv0+PdkDJL1cJy0K4kOCFa8zc1E3mp+RHPQcpdfb/6GovEsW4VDrOMw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.12.1.tgz", + "integrity": "sha512-d+/o30tJxFxrA1lhzJqiUcEJdI6jKlNregCv5bASeGf2Q4MXmnwH7viDo7nhx1/ohf09oaH8j1GVYG/e3Yqk6A==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-remap-async-to-generator": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-remap-async-to-generator": "^7.12.1", "@babel/plugin-syntax-async-generators": "^7.8.0" } }, "@babel/plugin-proposal-class-properties": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.8.3.tgz", - "integrity": "sha512-EqFhbo7IosdgPgZggHaNObkmO1kNUe3slaKu54d5OWvy+p9QIKOzK1GAEpAIsZtWVtPXUHSMcT4smvDrCfY4AA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.12.1.tgz", + "integrity": "sha512-cKp3dlQsFsEs5CWKnN7BnSHOd0EOW8EKpEjkoz1pO2E5KzIDNV9Ros1b0CnmbVgAGXJubOYVBOGCT1OmJwOI7w==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-create-class-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-proposal-decorators": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.8.3.tgz", - "integrity": "sha512-e3RvdvS4qPJVTe288DlXjwKflpfy1hr0j5dz5WpIYYeP7vQZg2WfAEIp8k5/Lwis/m5REXEteIz6rrcDtXXG7w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.12.1.tgz", + "integrity": "sha512-knNIuusychgYN8fGJHONL0RbFxLGawhXOJNLBk75TniTsZZeA+wdkDuv6wp4lGwzQEKjZi6/WYtnb3udNPmQmQ==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/plugin-syntax-decorators": "^7.8.3" + "@babel/helper-create-class-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-decorators": "^7.12.1" } }, "@babel/plugin-proposal-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz", - "integrity": "sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.12.1.tgz", + "integrity": "sha512-a4rhUSZFuq5W8/OO8H7BL5zspjnc1FLd9hlOxIK/f7qG4a0qsqk8uvF/ywgBA8/OmjsapjpvaEOYItfGG1qIvQ==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-dynamic-import": "^7.8.0" } }, + "@babel/plugin-proposal-export-namespace-from": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.1.tgz", + "integrity": "sha512-6CThGf0irEkzujYS5LQcjBx8j/4aQGiVv7J9+2f7pGfxqyKh3WnmVJYW3hdrQjyksErMGBPQrCnHfOtna+WLbw==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + } + }, "@babel/plugin-proposal-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz", - "integrity": "sha512-KGhQNZ3TVCQG/MjRbAUwuH+14y9q0tpxs1nWWs3pbSleRdDro9SAMMDyye8HhY1gqZ7/NqIc8SKhya0wRDgP1Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.12.1.tgz", + "integrity": "sha512-GoLDUi6U9ZLzlSda2Df++VSqDJg3CG+dR0+iWsv6XRw1rEq+zwt4DirM9yrxW6XWaTpmai1cWJLMfM8qQJf+yw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-json-strings": "^7.8.0" } }, + "@babel/plugin-proposal-logical-assignment-operators": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.12.1.tgz", + "integrity": "sha512-k8ZmVv0JU+4gcUGeCDZOGd0lCIamU/sMtIiX3UWnUc5yzgq6YUGyEolNYD+MLYKfSzgECPcqetVcJP9Afe/aCA==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + } + }, "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.12.1.tgz", + "integrity": "sha512-nZY0ESiaQDI1y96+jk6VxMOaL4LPo/QDHBqL+SF3/vl6dHkTwHlOI8L4ZwuRBHgakRBw5zsVylel7QPbbGuYgg==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0" } }, "@babel/plugin-proposal-numeric-separator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.8.3.tgz", - "integrity": "sha512-jWioO1s6R/R+wEHizfaScNsAx+xKgwTLNXSh7tTC4Usj3ItsPEhYkEpU4h+lpnBwq7NBVOJXfO6cRFYcX69JUQ==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.5.tgz", + "integrity": "sha512-UiAnkKuOrCyjZ3sYNHlRlfuZJbBHknMQ9VMwVeX97Ofwx7RpD6gS2HfqTCh8KNUQgcOm8IKt103oR4KIjh7Q8g==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" } }, "@babel/plugin-proposal-object-rest-spread": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.9.5.tgz", - "integrity": "sha512-VP2oXvAf7KCYTthbUHwBlewbl1Iq059f6seJGsxMizaCdgHIeczOr7FBqELhSqfkIl04Fi8okzWzl63UKbQmmg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz", + "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.0", - "@babel/plugin-transform-parameters": "^7.9.5" + "@babel/plugin-transform-parameters": "^7.12.1" } }, "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-0gkX7J7E+AtAw9fcwlVQj8peP61qhdg/89D5swOkjYbkboA2CVckn3kiyum1DE0wskGb7KJJxBdyEBApDLLVdw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.12.1.tgz", + "integrity": "sha512-hFvIjgprh9mMw5v42sJWLI1lzU5L2sznP805zeT6rySVRA0Y18StRhDqhSxlap0oVgItRsB6WSROp4YnJTJz0g==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", "@babel/plugin-syntax-optional-catch-binding": "^7.8.0" } }, "@babel/plugin-proposal-optional-chaining": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.9.0.tgz", - "integrity": "sha512-NDn5tu3tcv4W30jNhmc2hyD5c56G6cXx4TesJubhxrJeCvuuMpttxr0OnNCqbZGhFjLrg+NIhxxC+BK5F6yS3w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.12.1.tgz", + "integrity": "sha512-c2uRpY6WzaVDzynVY9liyykS+kVU+WRZPMPYpkelXH8KBt1oXoI89kPbZKKG/jDT5UK92FTW2fZkZaJhdiBabw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1", "@babel/plugin-syntax-optional-chaining": "^7.8.0" } }, + "@babel/plugin-proposal-private-methods": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.12.1.tgz", + "integrity": "sha512-mwZ1phvH7/NHK6Kf8LP7MYDogGV+DKB1mryFOEwx5EBNQrosvIczzZFTUmWaeujd5xT6G1ELYWUz3CutMhjE1w==", + "requires": { + "@babel/helper-create-class-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" + } + }, "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.8.8", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.8.tgz", - "integrity": "sha512-EVhjVsMpbhLw9ZfHWSx2iy13Q8Z/eg8e8ccVWt23sWQK5l1UdkoLJPN5w69UA4uITGBnEZD2JOe4QOHycYKv8A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.1.tgz", + "integrity": "sha512-MYq+l+PvHuw/rKUz1at/vb6nCnQ2gmJBNaM62z0OgH7B2W1D9pvkpYtlti9bGtizNIU1K3zm4bZF9F91efVY0w==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.8.8", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-create-regexp-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-syntax-async-generators": { @@ -449,12 +588,20 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, + "@babel/plugin-syntax-class-properties": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.1.tgz", + "integrity": "sha512-U40A76x5gTwmESz+qiqssqmeEsKvcSyvtgktrm0uzcARAmM9I1jR221f6Oq+GmHrcD+LvZDag1UTOTe2fL3TeA==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" + } + }, "@babel/plugin-syntax-decorators": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.8.3.tgz", - "integrity": "sha512-8Hg4dNNT9/LcA1zQlfwuKR8BUc/if7Q7NkTam9sGTcJphLwpf2g4S42uhspQrIrR+dpzE0dtTqBVFoHl8GtnnQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.1.tgz", + "integrity": "sha512-ir9YW5daRrTYiy9UJ2TzdNIJEZu8KclVzDcfSt4iEmOtwQ4llPtWInNKJyKnVXp1vE4bbVd5S31M/im3mYMO1w==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-syntax-dynamic-import": { @@ -465,6 +612,14 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, + "@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-syntax-json-strings": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", @@ -474,11 +629,19 @@ } }, "@babel/plugin-syntax-jsx": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.8.3.tgz", - "integrity": "sha512-WxdW9xyLgBdefoo0Ynn3MRSkhe5tFVxxKNVdnZSh318WrG2e2jH+E9wd/++JsqcLJZPfz87njQJ8j2Upjm0M0A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz", + "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-syntax-nullish-coalescing-operator": { @@ -490,11 +653,11 @@ } }, "@babel/plugin-syntax-numeric-separator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.8.3.tgz", - "integrity": "sha512-H7dCMAdN83PcCmqmkHB5dtp+Xa9a6LKSvA2hiFBC/5alSHxM5VgWZXFqDi0YFe8XNGT6iCa+z4V4zSt/PdZ7Dw==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-syntax-object-rest-spread": { @@ -522,244 +685,243 @@ } }, "@babel/plugin-syntax-top-level-await": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz", - "integrity": "sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.1.tgz", + "integrity": "sha512-i7ooMZFS+a/Om0crxZodrTzNEPJHZrlMVGMTEpFAj6rYY/bKCddB0Dk/YxfPuYXOopuhKk/e1jV6h+WUU9XN3A==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-arrow-functions": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz", - "integrity": "sha512-0MRF+KC8EqH4dbuITCWwPSzsyO3HIWWlm30v8BbbpOrS1B++isGxPnnuq/IZvOX5J2D/p7DQalQm+/2PnlKGxg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.12.1.tgz", + "integrity": "sha512-5QB50qyN44fzzz4/qxDPQMBCTHgxg3n0xRBLJUmBlLoU/sFvxVWGZF/ZUfMVDQuJUKXaBhbupxIzIfZ6Fwk/0A==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-async-to-generator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.8.3.tgz", - "integrity": "sha512-imt9tFLD9ogt56Dd5CI/6XgpukMwd/fLGSrix2httihVe7LOGVPhyhMh1BU5kDM7iHD08i8uUtmV2sWaBFlHVQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.12.1.tgz", + "integrity": "sha512-SDtqoEcarK1DFlRJ1hHRY5HvJUj5kX4qmtpMAm2QnhOlyuMC4TMdCRgW6WXpv93rZeYNeLP22y8Aq2dbcDRM1A==", "requires": { - "@babel/helper-module-imports": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-remap-async-to-generator": "^7.8.3" + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-remap-async-to-generator": "^7.12.1" } }, "@babel/plugin-transform-block-scoped-functions": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.8.3.tgz", - "integrity": "sha512-vo4F2OewqjbB1+yaJ7k2EJFHlTP3jR634Z9Cj9itpqNjuLXvhlVxgnjsHsdRgASR8xYDrx6onw4vW5H6We0Jmg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.1.tgz", + "integrity": "sha512-5OpxfuYnSgPalRpo8EWGPzIYf0lHBWORCkj5M0oLBwHdlux9Ri36QqGW3/LR13RSVOAoUUMzoPI/jpE4ABcHoA==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-block-scoping": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.8.3.tgz", - "integrity": "sha512-pGnYfm7RNRgYRi7bids5bHluENHqJhrV4bCZRwc5GamaWIIs07N4rZECcmJL6ZClwjDz1GbdMZFtPs27hTB06w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.1.tgz", + "integrity": "sha512-zJyAC9sZdE60r1nVQHblcfCj29Dh2Y0DOvlMkcqSo0ckqjiCwNiUezUKw+RjOCwGfpLRwnAeQ2XlLpsnGkvv9w==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", - "lodash": "^4.17.13" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-classes": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.9.5.tgz", - "integrity": "sha512-x2kZoIuLC//O5iA7PEvecB105o7TLzZo8ofBVhP79N+DO3jaX+KYfww9TQcfBEZD0nikNyYcGB1IKtRq36rdmg==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.8.3", - "@babel/helper-define-map": "^7.8.3", - "@babel/helper-function-name": "^7.9.5", - "@babel/helper-optimise-call-expression": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.6", - "@babel/helper-split-export-declaration": "^7.8.3", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.12.1.tgz", + "integrity": "sha512-/74xkA7bVdzQTBeSUhLLJgYIcxw/dpEpCdRDiHgPJ3Mv6uC11UhjpOhl72CgqbBCmt1qtssCyB2xnJm1+PFjog==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.10.4", + "@babel/helper-define-map": "^7.10.4", + "@babel/helper-function-name": "^7.10.4", + "@babel/helper-optimise-call-expression": "^7.10.4", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-replace-supers": "^7.12.1", + "@babel/helper-split-export-declaration": "^7.10.4", "globals": "^11.1.0" } }, "@babel/plugin-transform-computed-properties": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.8.3.tgz", - "integrity": "sha512-O5hiIpSyOGdrQZRQ2ccwtTVkgUDBBiCuK//4RJ6UfePllUTCENOzKxfh6ulckXKc0DixTFLCfb2HVkNA7aDpzA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.12.1.tgz", + "integrity": "sha512-vVUOYpPWB7BkgUWPo4C44mUQHpTZXakEqFjbv8rQMg7TC6S6ZhGZ3otQcRH6u7+adSlE5i0sp63eMC/XGffrzg==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-destructuring": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.9.5.tgz", - "integrity": "sha512-j3OEsGel8nHL/iusv/mRd5fYZ3DrOxWC82x0ogmdN/vHfAP4MYw+AFKYanzWlktNwikKvlzUV//afBW5FTp17Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.12.1.tgz", + "integrity": "sha512-fRMYFKuzi/rSiYb2uRLiUENJOKq4Gnl+6qOv5f8z0TZXg3llUwUhsNNwrwaT/6dUhJTzNpBr+CUvEWBtfNY1cw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-dotall-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.8.3.tgz", - "integrity": "sha512-kLs1j9Nn4MQoBYdRXH6AeaXMbEJFaFu/v1nQkvib6QzTj8MZI5OQzqmD83/2jEM1z0DLilra5aWO5YpyC0ALIw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.1.tgz", + "integrity": "sha512-B2pXeRKoLszfEW7J4Hg9LoFaWEbr/kzo3teWHmtFCszjRNa/b40f9mfeqZsIDLLt/FjwQ6pz/Gdlwy85xNckBA==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-create-regexp-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-duplicate-keys": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.8.3.tgz", - "integrity": "sha512-s8dHiBUbcbSgipS4SMFuWGqCvyge5V2ZeAWzR6INTVC3Ltjig/Vw1G2Gztv0vU/hRG9X8IvKvYdoksnUfgXOEQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.1.tgz", + "integrity": "sha512-iRght0T0HztAb/CazveUpUQrZY+aGKKaWXMJ4uf9YJtqxSUe09j3wteztCUDRHs+SRAL7yMuFqUsLoAKKzgXjw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-exponentiation-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.8.3.tgz", - "integrity": "sha512-zwIpuIymb3ACcInbksHaNcR12S++0MDLKkiqXHl3AzpgdKlFNhog+z/K0+TGW+b0w5pgTq4H6IwV/WhxbGYSjQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.1.tgz", + "integrity": "sha512-7tqwy2bv48q+c1EHbXK0Zx3KXd2RVQp6OC7PbwFNt/dPTAV3Lu5sWtWuAj8owr5wqtWnqHfl2/mJlUmqkChKug==", "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.10.4", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-for-of": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.9.0.tgz", - "integrity": "sha512-lTAnWOpMwOXpyDx06N+ywmF3jNbafZEqZ96CGYabxHrxNX8l5ny7dt4bK/rGwAh9utyP2b2Hv7PlZh1AAS54FQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.12.1.tgz", + "integrity": "sha512-Zaeq10naAsuHo7heQvyV0ptj4dlZJwZgNAtBYBnu5nNKJoW62m0zKcIEyVECrUKErkUkg6ajMy4ZfnVZciSBhg==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-function-name": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.8.3.tgz", - "integrity": "sha512-rO/OnDS78Eifbjn5Py9v8y0aR+aSYhDhqAwVfsTl0ERuMZyr05L1aFSCJnbv2mmsLkit/4ReeQ9N2BgLnOcPCQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.1.tgz", + "integrity": "sha512-JF3UgJUILoFrFMEnOJLJkRHSk6LUSXLmEFsA23aR2O5CSLUxbeUX1IZ1YQ7Sn0aXb601Ncwjx73a+FVqgcljVw==", "requires": { - "@babel/helper-function-name": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-function-name": "^7.10.4", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-literals": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.8.3.tgz", - "integrity": "sha512-3Tqf8JJ/qB7TeldGl+TT55+uQei9JfYaregDcEAyBZ7akutriFrt6C/wLYIer6OYhleVQvH/ntEhjE/xMmy10A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.1.tgz", + "integrity": "sha512-+PxVGA+2Ag6uGgL0A5f+9rklOnnMccwEBzwYFL3EUaKuiyVnUipyXncFcfjSkbimLrODoqki1U9XxZzTvfN7IQ==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-member-expression-literals": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz", - "integrity": "sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.1.tgz", + "integrity": "sha512-1sxePl6z9ad0gFMB9KqmYofk34flq62aqMt9NqliS/7hPEpURUCMbyHXrMPlo282iY7nAvUB1aQd5mg79UD9Jg==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-modules-amd": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.9.0.tgz", - "integrity": "sha512-vZgDDF003B14O8zJy0XXLnPH4sg+9X5hFBBGN1V+B2rgrB+J2xIypSN6Rk9imB2hSTHQi5OHLrFWsZab1GMk+Q==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.12.1.tgz", + "integrity": "sha512-tDW8hMkzad5oDtzsB70HIQQRBiTKrhfgwC/KkJeGsaNFTdWhKNt/BiE8c5yj19XiGyrxpbkOfH87qkNg1YGlOQ==", "requires": { - "@babel/helper-module-transforms": "^7.9.0", - "@babel/helper-plugin-utils": "^7.8.3", - "babel-plugin-dynamic-import-node": "^2.3.0" + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-commonjs": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.9.0.tgz", - "integrity": "sha512-qzlCrLnKqio4SlgJ6FMMLBe4bySNis8DFn1VkGmOcxG9gqEyPIOzeQrA//u0HAKrWpJlpZbZMPB1n/OPa4+n8g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.12.1.tgz", + "integrity": "sha512-dY789wq6l0uLY8py9c1B48V8mVL5gZh/+PQ5ZPrylPYsnAvnEMjqsUXkuoDVPeVK+0VyGar+D08107LzDQ6pag==", "requires": { - "@babel/helper-module-transforms": "^7.9.0", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-simple-access": "^7.8.3", - "babel-plugin-dynamic-import-node": "^2.3.0" + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-simple-access": "^7.12.1", + "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-systemjs": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.9.0.tgz", - "integrity": "sha512-FsiAv/nao/ud2ZWy4wFacoLOm5uxl0ExSQ7ErvP7jpoihLR6Cq90ilOFyX9UXct3rbtKsAiZ9kFt5XGfPe/5SQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.12.1.tgz", + "integrity": "sha512-Hn7cVvOavVh8yvW6fLwveFqSnd7rbQN3zJvoPNyNaQSvgfKmDBO9U1YL9+PCXGRlZD9tNdWTy5ACKqMuzyn32Q==", "requires": { - "@babel/helper-hoist-variables": "^7.8.3", - "@babel/helper-module-transforms": "^7.9.0", - "@babel/helper-plugin-utils": "^7.8.3", - "babel-plugin-dynamic-import-node": "^2.3.0" + "@babel/helper-hoist-variables": "^7.10.4", + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-validator-identifier": "^7.10.4", + "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-umd": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.9.0.tgz", - "integrity": "sha512-uTWkXkIVtg/JGRSIABdBoMsoIeoHQHPTL0Y2E7xf5Oj7sLqwVsNXOkNk0VJc7vF0IMBsPeikHxFjGe+qmwPtTQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.12.1.tgz", + "integrity": "sha512-aEIubCS0KHKM0zUos5fIoQm+AZUMt1ZvMpqz0/H5qAQ7vWylr9+PLYurT+Ic7ID/bKLd4q8hDovaG3Zch2uz5Q==", "requires": { - "@babel/helper-module-transforms": "^7.9.0", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-module-transforms": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.8.3.tgz", - "integrity": "sha512-f+tF/8UVPU86TrCb06JoPWIdDpTNSGGcAtaD9mLP0aYGA0OS0j7j7DHJR0GTFrUZPUU6loZhbsVZgTh0N+Qdnw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.1.tgz", + "integrity": "sha512-tB43uQ62RHcoDp9v2Nsf+dSM8sbNodbEicbQNA53zHz8pWUhsgHSJCGpt7daXxRydjb0KnfmB+ChXOv3oADp1Q==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.8.3" + "@babel/helper-create-regexp-features-plugin": "^7.12.1" } }, "@babel/plugin-transform-new-target": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.8.3.tgz", - "integrity": "sha512-QuSGysibQpyxexRyui2vca+Cmbljo8bcRckgzYV4kRIsHpVeyeC3JDO63pY+xFZ6bWOBn7pfKZTqV4o/ix9sFw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.1.tgz", + "integrity": "sha512-+eW/VLcUL5L9IvJH7rT1sT0CzkdUTvPrXC2PXTn/7z7tXLBuKvezYbGdxD5WMRoyvyaujOq2fWoKl869heKjhw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-object-super": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.8.3.tgz", - "integrity": "sha512-57FXk+gItG/GejofIyLIgBKTas4+pEU47IXKDBWFTxdPd7F80H8zybyAY7UoblVfBhBGs2EKM+bJUu2+iUYPDQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.1.tgz", + "integrity": "sha512-AvypiGJH9hsquNUn+RXVcBdeE3KHPZexWRdimhuV59cSoOt5kFBmqlByorAeUlGG2CJWd0U+4ZtNKga/TB0cAw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-replace-supers": "^7.12.1" } }, "@babel/plugin-transform-parameters": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.9.5.tgz", - "integrity": "sha512-0+1FhHnMfj6lIIhVvS4KGQJeuhe1GI//h5uptK4PvLt+BGBxsoUJbd3/IW002yk//6sZPlFgsG1hY6OHLcy6kA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.12.1.tgz", + "integrity": "sha512-xq9C5EQhdPK23ZeCdMxl8bbRnAgHFrw5EOC3KJUsSylZqdkCaFEXxGSBuTSObOpiiHHNyb82es8M1QYgfQGfNg==", "requires": { - "@babel/helper-get-function-arity": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-property-literals": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz", - "integrity": "sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.1.tgz", + "integrity": "sha512-6MTCR/mZ1MQS+AwZLplX4cEySjCpnIF26ToWo942nqn8hXSm7McaHQNeGx/pt7suI1TWOWMfa/NgBhiqSnX0cQ==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-regenerator": { - "version": "7.8.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.7.tgz", - "integrity": "sha512-TIg+gAl4Z0a3WmD3mbYSk+J9ZUH6n/Yc57rtKRnlA/7rcCvpekHXe0CMZHP1gYp7/KLe9GHTuIba0vXmls6drA==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.12.1.tgz", + "integrity": "sha512-gYrHqs5itw6i4PflFX3OdBPMQdPbF4bj2REIUxlMRUFk0/ZOAIpDFuViuxPjUL7YC8UPnf+XG7/utJvqXdPKng==", "requires": { "regenerator-transform": "^0.14.2" } }, "@babel/plugin-transform-reserved-words": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz", - "integrity": "sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.1.tgz", + "integrity": "sha512-pOnUfhyPKvZpVyBHhSBoX8vfA09b7r00Pmm1sH+29ae2hMTKVmSp4Ztsr8KBKjLjx17H0eJqaRC3bR2iThM54A==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-runtime": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.9.0.tgz", - "integrity": "sha512-pUu9VSf3kI1OqbWINQ7MaugnitRss1z533436waNXp+0N3ur3zfut37sXiQMxkuCF4VUjwZucen/quskCh7NHw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.12.1.tgz", + "integrity": "sha512-Ac/H6G9FEIkS2tXsZjL4RAdS3L3WHxci0usAnz7laPWUmFiGtj7tIASChqKZMHTSQTQY6xDbOq+V1/vIq3QrWg==", "requires": { - "@babel/helper-module-imports": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", "resolve": "^1.8.1", "semver": "^5.5.1" }, @@ -772,120 +934,134 @@ } }, "@babel/plugin-transform-shorthand-properties": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.8.3.tgz", - "integrity": "sha512-I9DI6Odg0JJwxCHzbzW08ggMdCezoWcuQRz3ptdudgwaHxTjxw5HgdFJmZIkIMlRymL6YiZcped4TTCB0JcC8w==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.1.tgz", + "integrity": "sha512-GFZS3c/MhX1OusqB1MZ1ct2xRzX5ppQh2JU1h2Pnfk88HtFTM+TWQqJNfwkmxtPQtb/s1tk87oENfXJlx7rSDw==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.8.3.tgz", - "integrity": "sha512-CkuTU9mbmAoFOI1tklFWYYbzX5qCIZVXPVy0jpXgGwkplCndQAa58s2jr66fTeQnA64bDox0HL4U56CFYoyC7g==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.12.1.tgz", + "integrity": "sha512-vuLp8CP0BE18zVYjsEBZ5xoCecMK6LBMMxYzJnh01rxQRvhNhH1csMMmBfNo5tGpGO+NhdSNW2mzIvBu3K1fng==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-skip-transparent-expression-wrappers": "^7.12.1" } }, "@babel/plugin-transform-sticky-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.8.3.tgz", - "integrity": "sha512-9Spq0vGCD5Bb4Z/ZXXSK5wbbLFMG085qd2vhL1JYu1WcQ5bXqZBAYRzU1d+p79GcHs2szYv5pVQCX13QgldaWw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.1.tgz", + "integrity": "sha512-CiUgKQ3AGVk7kveIaPEET1jNDhZZEl1RPMWdTBE1799bdz++SwqDHStmxfCtDfBhQgCl38YRiSnrMuUMZIWSUQ==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-regex": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-regex": "^7.10.4" } }, "@babel/plugin-transform-template-literals": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.8.3.tgz", - "integrity": "sha512-820QBtykIQOLFT8NZOcTRJ1UNuztIELe4p9DCgvj4NK+PwluSJ49we7s9FB1HIGNIYT7wFUJ0ar2QpCDj0escQ==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.12.1.tgz", + "integrity": "sha512-b4Zx3KHi+taXB1dVRBhVJtEPi9h1THCeKmae2qP0YdUHIFhVjtpqqNfxeVAa1xeHVhAy4SbHxEwx5cltAu5apw==", "requires": { - "@babel/helper-annotate-as-pure": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-typeof-symbol": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.8.4.tgz", - "integrity": "sha512-2QKyfjGdvuNfHsb7qnBBlKclbD4CfshH2KvDabiijLMGXPHJXGxtDzwIF7bQP+T0ysw8fYTtxPafgfs/c1Lrqg==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.1.tgz", + "integrity": "sha512-EPGgpGy+O5Kg5pJFNDKuxt9RdmTgj5sgrus2XVeMp/ZIbOESadgILUbm50SNpghOh3/6yrbsH+NB5+WJTmsA7Q==", "requires": { - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-plugin-utils": "^7.10.4" + } + }, + "@babel/plugin-transform-unicode-escapes": { + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.1.tgz", + "integrity": "sha512-I8gNHJLIc7GdApm7wkVnStWssPNbSRMPtgHdmH3sRM1zopz09UWPS4x5V4n1yz/MIWTVnJ9sp6IkuXdWM4w+2Q==", + "requires": { + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/plugin-transform-unicode-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.8.3.tgz", - "integrity": "sha512-+ufgJjYdmWfSQ+6NS9VGUR2ns8cjJjYbrbi11mZBTaWm+Fui/ncTLFF28Ei1okavY+xkojGr1eJxNsWYeA5aZw==", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.1.tgz", + "integrity": "sha512-SqH4ClNngh/zGwHZOOQMTD+e8FGWexILV+ePMyiDJttAWRh5dhDL8rcl5lSgU3Huiq6Zn6pWTMvdPAb21Dwdyg==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3" + "@babel/helper-create-regexp-features-plugin": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4" } }, "@babel/preset-env": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.9.5.tgz", - "integrity": "sha512-eWGYeADTlPJH+wq1F0wNfPbVS1w1wtmMJiYk55Td5Yu28AsdR9AsC97sZ0Qq8fHqQuslVSIYSGJMcblr345GfQ==", - "requires": { - "@babel/compat-data": "^7.9.0", - "@babel/helper-compilation-targets": "^7.8.7", - "@babel/helper-module-imports": "^7.8.3", - "@babel/helper-plugin-utils": "^7.8.3", - "@babel/plugin-proposal-async-generator-functions": "^7.8.3", - "@babel/plugin-proposal-dynamic-import": "^7.8.3", - "@babel/plugin-proposal-json-strings": "^7.8.3", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-proposal-numeric-separator": "^7.8.3", - "@babel/plugin-proposal-object-rest-spread": "^7.9.5", - "@babel/plugin-proposal-optional-catch-binding": "^7.8.3", - "@babel/plugin-proposal-optional-chaining": "^7.9.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.8.3", + "version": "7.12.1", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.12.1.tgz", + "integrity": "sha512-H8kxXmtPaAGT7TyBvSSkoSTUK6RHh61So05SyEbpmr0MCZrsNYn7mGMzzeYoOUCdHzww61k8XBft2TaES+xPLg==", + "requires": { + "@babel/compat-data": "^7.12.1", + "@babel/helper-compilation-targets": "^7.12.1", + "@babel/helper-module-imports": "^7.12.1", + "@babel/helper-plugin-utils": "^7.10.4", + "@babel/helper-validator-option": "^7.12.1", + "@babel/plugin-proposal-async-generator-functions": "^7.12.1", + "@babel/plugin-proposal-class-properties": "^7.12.1", + "@babel/plugin-proposal-dynamic-import": "^7.12.1", + "@babel/plugin-proposal-export-namespace-from": "^7.12.1", + "@babel/plugin-proposal-json-strings": "^7.12.1", + "@babel/plugin-proposal-logical-assignment-operators": "^7.12.1", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.12.1", + "@babel/plugin-proposal-numeric-separator": "^7.12.1", + "@babel/plugin-proposal-object-rest-spread": "^7.12.1", + "@babel/plugin-proposal-optional-catch-binding": "^7.12.1", + "@babel/plugin-proposal-optional-chaining": "^7.12.1", + "@babel/plugin-proposal-private-methods": "^7.12.1", + "@babel/plugin-proposal-unicode-property-regex": "^7.12.1", "@babel/plugin-syntax-async-generators": "^7.8.0", + "@babel/plugin-syntax-class-properties": "^7.12.1", "@babel/plugin-syntax-dynamic-import": "^7.8.0", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", "@babel/plugin-syntax-json-strings": "^7.8.0", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0", - "@babel/plugin-syntax-numeric-separator": "^7.8.0", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", "@babel/plugin-syntax-object-rest-spread": "^7.8.0", "@babel/plugin-syntax-optional-catch-binding": "^7.8.0", "@babel/plugin-syntax-optional-chaining": "^7.8.0", - "@babel/plugin-syntax-top-level-await": "^7.8.3", - "@babel/plugin-transform-arrow-functions": "^7.8.3", - "@babel/plugin-transform-async-to-generator": "^7.8.3", - "@babel/plugin-transform-block-scoped-functions": "^7.8.3", - "@babel/plugin-transform-block-scoping": "^7.8.3", - "@babel/plugin-transform-classes": "^7.9.5", - "@babel/plugin-transform-computed-properties": "^7.8.3", - "@babel/plugin-transform-destructuring": "^7.9.5", - "@babel/plugin-transform-dotall-regex": "^7.8.3", - "@babel/plugin-transform-duplicate-keys": "^7.8.3", - "@babel/plugin-transform-exponentiation-operator": "^7.8.3", - "@babel/plugin-transform-for-of": "^7.9.0", - "@babel/plugin-transform-function-name": "^7.8.3", - "@babel/plugin-transform-literals": "^7.8.3", - "@babel/plugin-transform-member-expression-literals": "^7.8.3", - "@babel/plugin-transform-modules-amd": "^7.9.0", - "@babel/plugin-transform-modules-commonjs": "^7.9.0", - "@babel/plugin-transform-modules-systemjs": "^7.9.0", - "@babel/plugin-transform-modules-umd": "^7.9.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.8.3", - "@babel/plugin-transform-new-target": "^7.8.3", - "@babel/plugin-transform-object-super": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.9.5", - "@babel/plugin-transform-property-literals": "^7.8.3", - "@babel/plugin-transform-regenerator": "^7.8.7", - "@babel/plugin-transform-reserved-words": "^7.8.3", - "@babel/plugin-transform-shorthand-properties": "^7.8.3", - "@babel/plugin-transform-spread": "^7.8.3", - "@babel/plugin-transform-sticky-regex": "^7.8.3", - "@babel/plugin-transform-template-literals": "^7.8.3", - "@babel/plugin-transform-typeof-symbol": "^7.8.4", - "@babel/plugin-transform-unicode-regex": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.12.1", + "@babel/plugin-transform-arrow-functions": "^7.12.1", + "@babel/plugin-transform-async-to-generator": "^7.12.1", + "@babel/plugin-transform-block-scoped-functions": "^7.12.1", + "@babel/plugin-transform-block-scoping": "^7.12.1", + "@babel/plugin-transform-classes": "^7.12.1", + "@babel/plugin-transform-computed-properties": "^7.12.1", + "@babel/plugin-transform-destructuring": "^7.12.1", + "@babel/plugin-transform-dotall-regex": "^7.12.1", + "@babel/plugin-transform-duplicate-keys": "^7.12.1", + "@babel/plugin-transform-exponentiation-operator": "^7.12.1", + "@babel/plugin-transform-for-of": "^7.12.1", + "@babel/plugin-transform-function-name": "^7.12.1", + "@babel/plugin-transform-literals": "^7.12.1", + "@babel/plugin-transform-member-expression-literals": "^7.12.1", + "@babel/plugin-transform-modules-amd": "^7.12.1", + "@babel/plugin-transform-modules-commonjs": "^7.12.1", + "@babel/plugin-transform-modules-systemjs": "^7.12.1", + "@babel/plugin-transform-modules-umd": "^7.12.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.12.1", + "@babel/plugin-transform-new-target": "^7.12.1", + "@babel/plugin-transform-object-super": "^7.12.1", + "@babel/plugin-transform-parameters": "^7.12.1", + "@babel/plugin-transform-property-literals": "^7.12.1", + "@babel/plugin-transform-regenerator": "^7.12.1", + "@babel/plugin-transform-reserved-words": "^7.12.1", + "@babel/plugin-transform-shorthand-properties": "^7.12.1", + "@babel/plugin-transform-spread": "^7.12.1", + "@babel/plugin-transform-sticky-regex": "^7.12.1", + "@babel/plugin-transform-template-literals": "^7.12.1", + "@babel/plugin-transform-typeof-symbol": "^7.12.1", + "@babel/plugin-transform-unicode-escapes": "^7.12.1", + "@babel/plugin-transform-unicode-regex": "^7.12.1", "@babel/preset-modules": "^0.1.3", - "@babel/types": "^7.9.5", - "browserslist": "^4.9.1", + "@babel/types": "^7.12.1", "core-js-compat": "^3.6.2", - "invariant": "^2.2.2", - "levenary": "^1.1.1", "semver": "^5.5.0" }, "dependencies": { @@ -897,9 +1073,9 @@ } }, "@babel/preset-modules": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.3.tgz", - "integrity": "sha512-Ra3JXOHBq2xd56xSF7lMKXdjBn3T772Y1Wet3yWnkDly9zHvJki029tAFzvAAK5cf4YV3yoxuP61crYRol6SVg==", + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.4.tgz", + "integrity": "sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg==", "requires": { "@babel/helper-plugin-utils": "^7.0.0", "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", @@ -909,52 +1085,52 @@ } }, "@babel/runtime": { - "version": "7.9.2", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.9.2.tgz", - "integrity": "sha512-NE2DtOdufG7R5vnfQUTehdTfNycfUANEtCa9PssN9O/xmTzP4E08UI797ixaei6hBEVL9BI/PsdJS5x7mWoB9Q==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.12.5.tgz", + "integrity": "sha512-plcc+hbExy3McchJCEQG3knOsuh3HH+Prx1P6cLIkET/0dLuQDEnrT+s27Axgc9bqfsmNUNHfscgMUdBpC9xfg==", "requires": { "regenerator-runtime": "^0.13.4" }, "dependencies": { "regenerator-runtime": { - "version": "0.13.5", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz", - "integrity": "sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA==" + "version": "0.13.7", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", + "integrity": "sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew==" } } }, "@babel/template": { - "version": "7.8.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", - "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.10.4.tgz", + "integrity": "sha512-ZCjD27cGJFUB6nmCB1Enki3r+L5kJveX9pq1SvAUKoICy6CZ9yD8xO086YXdYhvNjBdnekm4ZnaP5yC8Cs/1tA==", "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.6", - "@babel/types": "^7.8.6" + "@babel/code-frame": "^7.10.4", + "@babel/parser": "^7.10.4", + "@babel/types": "^7.10.4" } }, "@babel/traverse": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.9.5.tgz", - "integrity": "sha512-c4gH3jsvSuGUezlP6rzSJ6jf8fYjLj3hsMZRx/nX0h+fmHN0w+ekubRrHPqnMec0meycA2nwCsJ7dC8IPem2FQ==", - "requires": { - "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.9.5", - "@babel/helper-function-name": "^7.9.5", - "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.9.0", - "@babel/types": "^7.9.5", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.12.5.tgz", + "integrity": "sha512-xa15FbQnias7z9a62LwYAA5SZZPkHIXpd42C6uW68o8uTuua96FHZy1y61Va5P/i83FAAcMpW8+A/QayntzuqA==", + "requires": { + "@babel/code-frame": "^7.10.4", + "@babel/generator": "^7.12.5", + "@babel/helper-function-name": "^7.10.4", + "@babel/helper-split-export-declaration": "^7.11.0", + "@babel/parser": "^7.12.5", + "@babel/types": "^7.12.5", "debug": "^4.1.0", "globals": "^11.1.0", - "lodash": "^4.17.13" + "lodash": "^4.17.19" }, "dependencies": { "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "ms": { @@ -965,12 +1141,12 @@ } }, "@babel/types": { - "version": "7.9.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.9.5.tgz", - "integrity": "sha512-XjnvNqenk818r5zMaba+sLQjnbda31UfUURv3ei0qPQw4u+j2jMyJ5b11y8ZHYTRSI3NnInQkkkRT4fLqqPdHg==", + "version": "7.12.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.12.5.tgz", + "integrity": "sha512-gyTcvz7JFa4V45C0Zklv//GmFOAal5fL23OWpBLqc4nZ4Yrz67s4kCNwSK1Gu0MXGTU8mRY3zJYtacLdKXlzig==", "requires": { - "@babel/helper-validator-identifier": "^7.9.5", - "lodash": "^4.17.13", + "@babel/helper-validator-identifier": "^7.10.4", + "lodash": "^4.17.19", "to-fast-properties": "^2.0.0" }, "dependencies": { @@ -982,17 +1158,49 @@ } }, "@cosmos-ui/vue": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.22.0.tgz", - "integrity": "sha512-+1A6SNohzHKI64EsPP3N4spcWalGsnwRUB4y6ySBHkHlQ5X4KjsSkHOQ95xODMlwtKELiDSVjS8PsgdEyk+4Vg==", + "version": "0.35.0", + "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.35.0.tgz", + "integrity": "sha512-WTCJBWSoiDckgvXWPByKkQ7ZVSf9LSMsizIAHBnsi0Zp3GOaEqPNBpgjGt2JEhpDPr7+YwyIgmqQ0S3D+Hq5iQ==", "requires": { + "algoliasearch": "^4.1.0", "axios": "^0.19.2", "clipboard-copy": "^3.1.0", + "fuse.js": "^3.4.6", + "hotkeys-js": "^3.7.3", "js-base64": "^2.5.2", + "lodash": "^4.17.15", + "markdown-it": "^10.0.0", "prismjs": "^1.19.0", "querystring": "^0.2.0", "tiny-cookie": "^2.3.1", "vue": "^2.6.10" + }, + "dependencies": { + "axios": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz", + "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", + "requires": { + "follow-redirects": "1.5.10" + } + }, + "entities": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", + "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" + }, + "markdown-it": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz", + "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==", + "requires": { + "argparse": "^1.0.7", + "entities": "~2.0.0", + "linkify-it": "^2.0.0", + "mdurl": "^1.0.1", + "uc.micro": "^1.0.5" + } + } } }, "@mrmlnc/readdir-enhanced": { @@ -1023,9 +1231,9 @@ } }, "@types/babel-types": { - "version": "7.0.7", - "resolved": "https://registry.npmjs.org/@types/babel-types/-/babel-types-7.0.7.tgz", - "integrity": "sha512-dBtBbrc+qTHy1WdfHYjBwRln4+LWqASWakLHsWHR2NWHIFkv4W3O070IGoGLEBrJBvct3r0L1BUPuvURi7kYUQ==" + "version": "7.0.9", + "resolved": "https://registry.npmjs.org/@types/babel-types/-/babel-types-7.0.9.tgz", + "integrity": "sha512-qZLoYeXSTgQuK1h7QQS16hqLGdmqtRmN8w/rl3Au/l5x/zkHx+a4VHrHyBsi1I1vtK2oBHxSzKIu0R5p6spdOA==" }, "@types/babylon": { "version": "6.16.5", @@ -1035,124 +1243,172 @@ "@types/babel-types": "*" } }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==" - }, - "@types/events": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz", - "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==" - }, "@types/glob": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.1.tgz", - "integrity": "sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==", + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.3.tgz", + "integrity": "sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w==", "requires": { - "@types/events": "*", "@types/minimatch": "*", "@types/node": "*" } }, + "@types/json-schema": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.6.tgz", + "integrity": "sha512-3c+yGKvVP5Y9TYBEibGNR+kLtijnj7mYrXRg+WpFb2X9xm04g/DXYkfg4hmzJQosc9snFNUPkbYIhu+KAm6jJw==" + }, "@types/minimatch": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz", "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==" }, "@types/node": { - "version": "13.11.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-13.11.0.tgz", - "integrity": "sha512-uM4mnmsIIPK/yeO+42F2RQhGUIs39K2RFmugcJANppXe6J1nvH87PvzPZYpza7Xhhs8Yn9yIAVdLZ84z61+0xQ==" + "version": "14.14.6", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.6.tgz", + "integrity": "sha512-6QlRuqsQ/Ox/aJEQWBEJG7A9+u7oSYl3mem/K8IzxXG/kAGbV1YPD9Bg9Zw3vyxC/YP+zONKwy8hGkSt1jxFMw==" }, "@types/q": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.2.tgz", - "integrity": "sha512-ce5d3q03Ex0sy4R14722Rmt6MT07Ua+k4FwDfdcToYJcMKNtRVQvJ6JCAPdAmAnbRb6CsX6aYb9m96NGod9uTw==" + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", + "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" }, "@vue/babel-helper-vue-jsx-merge-props": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.0.0.tgz", - "integrity": "sha512-6tyf5Cqm4m6v7buITuwS+jHzPlIPxbFzEhXR5JGZpbrvOcp1hiQKckd305/3C7C36wFekNTQSxAtgeM0j0yoUw==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-jsx-merge-props/-/babel-helper-vue-jsx-merge-props-1.2.1.tgz", + "integrity": "sha512-QOi5OW45e2R20VygMSNhyQHvpdUwQZqGPc748JLGCYEy+yp8fNFNdbNIGAgZmi9e+2JHPd6i6idRuqivyicIkA==" + }, + "@vue/babel-helper-vue-transform-on": { + "version": "1.0.0-rc.2", + "resolved": "https://registry.npmjs.org/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.0.0-rc.2.tgz", + "integrity": "sha512-1+7CwjQ0Kasml6rHoNQUmbISwqLNNfFVBUcZl6QBremUl296ZmLrVQPqJP5pyAAWjZke5bpI1hlj+LVVuT7Jcg==" + }, + "@vue/babel-plugin-jsx": { + "version": "1.0.0-rc.3", + "resolved": "https://registry.npmjs.org/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.0.0-rc.3.tgz", + "integrity": "sha512-/Ibq0hoKsidnHWPhgRpjcjYhYcHpqEm2fiKVAPO88OXZNHGwaGgS4yXkC6TDEvlZep4mBDo+2S5T81wpbVh90Q==", + "requires": { + "@babel/helper-module-imports": "^7.0.0", + "@babel/plugin-syntax-jsx": "^7.0.0", + "@babel/traverse": "^7.0.0", + "@babel/types": "^7.0.0", + "@vue/babel-helper-vue-transform-on": "^1.0.0-rc.2", + "camelcase": "^6.0.0", + "html-tags": "^3.1.0", + "svg-tags": "^1.0.0" + }, + "dependencies": { + "camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==" + } + } }, "@vue/babel-plugin-transform-vue-jsx": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.1.2.tgz", - "integrity": "sha512-YfdaoSMvD1nj7+DsrwfTvTnhDXI7bsuh+Y5qWwvQXlD24uLgnsoww3qbiZvWf/EoviZMrvqkqN4CBw0W3BWUTQ==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-plugin-transform-vue-jsx/-/babel-plugin-transform-vue-jsx-1.2.1.tgz", + "integrity": "sha512-HJuqwACYehQwh1fNT8f4kyzqlNMpBuUK4rSiSES5D4QsYncv5fxFsLyrxFPG2ksO7t5WP+Vgix6tt6yKClwPzA==", "requires": { "@babel/helper-module-imports": "^7.0.0", "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", "html-tags": "^2.0.0", "lodash.kebabcase": "^4.1.1", "svg-tags": "^1.0.0" + }, + "dependencies": { + "html-tags": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", + "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=" + } } }, "@vue/babel-preset-app": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.3.1.tgz", - "integrity": "sha512-iNkySkbRWXGUA+Cvzj+/gEP0Y0uVAwwzfn21S7hkggSeIg9LJyZ+QzdxgKO0wgi01yTdb2mYWgeLQAfHZ65aew==", + "version": "4.5.8", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.5.8.tgz", + "integrity": "sha512-efCBo2HY8Jcs6+SyCnvWl8jGeF1Fl38reFL35AjO4SBcro0ol/qjPkeeJLjzvXUxrHAsM9DMfL/DvPa/hBmZwQ==", "requires": { - "@babel/core": "^7.9.0", - "@babel/helper-compilation-targets": "^7.8.7", + "@babel/core": "^7.11.0", + "@babel/helper-compilation-targets": "^7.9.6", "@babel/helper-module-imports": "^7.8.3", "@babel/plugin-proposal-class-properties": "^7.8.3", "@babel/plugin-proposal-decorators": "^7.8.3", "@babel/plugin-syntax-dynamic-import": "^7.8.3", "@babel/plugin-syntax-jsx": "^7.8.3", - "@babel/plugin-transform-runtime": "^7.9.0", - "@babel/preset-env": "^7.9.0", - "@babel/runtime": "^7.9.2", + "@babel/plugin-transform-runtime": "^7.11.0", + "@babel/preset-env": "^7.11.0", + "@babel/runtime": "^7.11.0", + "@vue/babel-plugin-jsx": "^1.0.0-0", "@vue/babel-preset-jsx": "^1.1.2", - "babel-plugin-dynamic-import-node": "^2.3.0", - "core-js": "^3.6.4", - "core-js-compat": "^3.6.4" + "babel-plugin-dynamic-import-node": "^2.3.3", + "core-js": "^3.6.5", + "core-js-compat": "^3.6.5", + "semver": "^6.1.0" }, "dependencies": { "core-js": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", - "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + "version": "3.6.5", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.5.tgz", + "integrity": "sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA==" } } }, "@vue/babel-preset-jsx": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.1.2.tgz", - "integrity": "sha512-zDpVnFpeC9YXmvGIDSsKNdL7qCG2rA3gjywLYHPCKDT10erjxF4U+6ay9X6TW5fl4GsDlJp9bVfAVQAAVzxxvQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-jsx/-/babel-preset-jsx-1.2.4.tgz", + "integrity": "sha512-oRVnmN2a77bYDJzeGSt92AuHXbkIxbf/XXSE3klINnh9AXBmVS1DGa1f0d+dDYpLfsAKElMnqKTQfKn7obcL4w==", + "requires": { + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", + "@vue/babel-sugar-composition-api-inject-h": "^1.2.1", + "@vue/babel-sugar-composition-api-render-instance": "^1.2.4", + "@vue/babel-sugar-functional-vue": "^1.2.2", + "@vue/babel-sugar-inject-h": "^1.2.2", + "@vue/babel-sugar-v-model": "^1.2.3", + "@vue/babel-sugar-v-on": "^1.2.3" + } + }, + "@vue/babel-sugar-composition-api-inject-h": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-inject-h/-/babel-sugar-composition-api-inject-h-1.2.1.tgz", + "integrity": "sha512-4B3L5Z2G+7s+9Bwbf+zPIifkFNcKth7fQwekVbnOA3cr3Pq71q71goWr97sk4/yyzH8phfe5ODVzEjX7HU7ItQ==", + "requires": { + "@babel/plugin-syntax-jsx": "^7.2.0" + } + }, + "@vue/babel-sugar-composition-api-render-instance": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-composition-api-render-instance/-/babel-sugar-composition-api-render-instance-1.2.4.tgz", + "integrity": "sha512-joha4PZznQMsxQYXtR3MnTgCASC9u3zt9KfBxIeuI5g2gscpTsSKRDzWQt4aqNIpx6cv8On7/m6zmmovlNsG7Q==", "requires": { - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", - "@vue/babel-sugar-functional-vue": "^1.1.2", - "@vue/babel-sugar-inject-h": "^1.1.2", - "@vue/babel-sugar-v-model": "^1.1.2", - "@vue/babel-sugar-v-on": "^1.1.2" + "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-functional-vue": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.1.2.tgz", - "integrity": "sha512-YhmdJQSVEFF5ETJXzrMpj0nkCXEa39TvVxJTuVjzvP2rgKhdMmQzlJuMv/HpadhZaRVMCCF3AEjjJcK5q/cYzQ==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-functional-vue/-/babel-sugar-functional-vue-1.2.2.tgz", + "integrity": "sha512-JvbgGn1bjCLByIAU1VOoepHQ1vFsroSA/QkzdiSs657V79q6OwEWLCQtQnEXD/rLTA8rRit4rMOhFpbjRFm82w==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-inject-h": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.1.2.tgz", - "integrity": "sha512-VRSENdTvD5htpnVp7i7DNuChR5rVMcORdXjvv5HVvpdKHzDZAYiLSD+GhnhxLm3/dMuk8pSzV+k28ECkiN5m8w==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-inject-h/-/babel-sugar-inject-h-1.2.2.tgz", + "integrity": "sha512-y8vTo00oRkzQTgufeotjCLPAvlhnpSkcHFEp60+LJUwygGcd5Chrpn5480AQp/thrxVm8m2ifAk0LyFel9oCnw==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0" } }, "@vue/babel-sugar-v-model": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.1.2.tgz", - "integrity": "sha512-vLXPvNq8vDtt0u9LqFdpGM9W9IWDmCmCyJXuozlq4F4UYVleXJ2Fa+3JsnTZNJcG+pLjjfnEGHci2339Kj5sGg==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-model/-/babel-sugar-v-model-1.2.3.tgz", + "integrity": "sha512-A2jxx87mySr/ulAsSSyYE8un6SIH0NWHiLaCWpodPCVOlQVODCaSpiR4+IMsmBr73haG+oeCuSvMOM+ttWUqRQ==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-helper-vue-jsx-merge-props": "^1.0.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", + "@vue/babel-helper-vue-jsx-merge-props": "^1.2.1", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", "camelcase": "^5.0.0", "html-tags": "^2.0.0", "svg-tags": "^1.0.0" @@ -1162,16 +1418,21 @@ "version": "5.3.1", "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==" + }, + "html-tags": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", + "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=" } } }, "@vue/babel-sugar-v-on": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.1.2.tgz", - "integrity": "sha512-T8ZCwC8Jp2uRtcZ88YwZtZXe7eQrJcfRq0uTFy6ShbwYJyz5qWskRFoVsdTi9o0WEhmQXxhQUewodOSCUPVmsQ==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@vue/babel-sugar-v-on/-/babel-sugar-v-on-1.2.3.tgz", + "integrity": "sha512-kt12VJdz/37D3N3eglBywV8GStKNUhNrsxChXIV+o0MwVXORYuhDTHJRKPgLJRb/EY3vM2aRFQdxJBp9CLikjw==", "requires": { "@babel/plugin-syntax-jsx": "^7.2.0", - "@vue/babel-plugin-transform-vue-jsx": "^1.1.2", + "@vue/babel-plugin-transform-vue-jsx": "^1.2.1", "camelcase": "^5.0.0" }, "dependencies": { @@ -1183,9 +1444,9 @@ } }, "@vue/component-compiler-utils": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.1.2.tgz", - "integrity": "sha512-QLq9z8m79mCinpaEeSURhnNCN6djxpHw0lpP/bodMlt5kALfONpryMthvnrQOlTcIKoF+VoPi+lPHUYeDFPXug==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.2.0.tgz", + "integrity": "sha512-lejBLa7xAMsfiZfNp7Kv51zOzifnb29FwdnMLa96z26kXErPFioSf9BMcePVIQ6/Gc6/mC0UrPpxAWIHyae0vw==", "requires": { "consolidate": "^0.15.1", "hash-sum": "^1.0.2", @@ -1215,17 +1476,17 @@ } }, "@vuepress/core": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.4.0.tgz", - "integrity": "sha512-xWiLG6MEzZdXGvr7/ickSr/plxPESC8c3prMOUDxROkFnyOiKmVvIyn4vAmRkFX3Xw4mfOLxucIOpQg0K6hEjw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.7.1.tgz", + "integrity": "sha512-M5sxZq30Ke1vXa4ZZjk6185fwtpiJOqzXNnzcIe0GxtvtaF8Yij6b+KqQKlUJnnUXm+CKxiLCr8PTzDY26N7yw==", "requires": { "@babel/core": "^7.8.4", "@vue/babel-preset-app": "^4.1.2", - "@vuepress/markdown": "^1.4.0", - "@vuepress/markdown-loader": "^1.4.0", - "@vuepress/plugin-last-updated": "^1.4.0", - "@vuepress/plugin-register-components": "^1.4.0", - "@vuepress/shared-utils": "^1.4.0", + "@vuepress/markdown": "1.7.1", + "@vuepress/markdown-loader": "1.7.1", + "@vuepress/plugin-last-updated": "1.7.1", + "@vuepress/plugin-register-components": "1.7.1", + "@vuepress/shared-utils": "1.7.1", "autoprefixer": "^9.5.1", "babel-loader": "^8.0.4", "cache-loader": "^3.0.0", @@ -1247,7 +1508,7 @@ "url-loader": "^1.0.1", "vue": "^2.6.10", "vue-loader": "^15.7.1", - "vue-router": "^3.1.3", + "vue-router": "^3.4.5", "vue-server-renderer": "^2.6.10", "vue-template-compiler": "^2.6.10", "vuepress-html-webpack-plugin": "^3.2.0", @@ -1260,18 +1521,18 @@ }, "dependencies": { "core-js": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", - "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + "version": "3.6.5", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.5.tgz", + "integrity": "sha512-vZVEEwZoIsI+vPEuoF9Iqf5H7/M3eeQqWlQnYa8FSKKePuYTf5MWnxb5SDAzCa60b3JBRS5g9b+Dq7b1y/RCrA==" } } }, "@vuepress/markdown": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.4.0.tgz", - "integrity": "sha512-H3uojkiO5/uWKpwBEPdk5fsSj+ZGgNR7xi6oYhUxaUak9nC6mhMZ3KzeNA67QmevG3XHEoYx4d9oeAC1Au1frg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.7.1.tgz", + "integrity": "sha512-Ava9vJECHG1+RC53ut1dXSze35IH5tc3qesC06Ny37WS93iDSQy09j8y+a0Lugy12j1369+QQeRFWa40tdHczA==", "requires": { - "@vuepress/shared-utils": "^1.4.0", + "@vuepress/shared-utils": "1.7.1", "markdown-it": "^8.4.1", "markdown-it-anchor": "^5.0.2", "markdown-it-chain": "^1.3.0", @@ -1300,84 +1561,84 @@ } }, "@vuepress/markdown-loader": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.4.0.tgz", - "integrity": "sha512-oEHB6EzCeIxyQxg1HSGX3snRL25V6XZ3O0Zx/sWd5hl0sneEsRLHRMflPGhKu4c6cfsyTck7aTbt7Z71vVy0FQ==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.7.1.tgz", + "integrity": "sha512-GM1F/tRhP9qZydTC89FXJPlLH+BmZijMKom5BYLAMEXsU20A9kABTRoatPjOUbZuKT+gn03JgG97qVd8xa/ETw==", "requires": { - "@vuepress/markdown": "^1.4.0", + "@vuepress/markdown": "1.7.1", "loader-utils": "^1.1.0", "lru-cache": "^5.1.1" } }, "@vuepress/plugin-active-header-links": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.4.0.tgz", - "integrity": "sha512-UWnRcqJZnX1LaPHxESx4XkRVJCleWvdGlSVivRGNLZuV1xrxJzB6LC86SNMur+imoyzeQL/oIgKY1QFx710g8w==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.7.1.tgz", + "integrity": "sha512-Wgf/oB9oPZLnYoLjQ/xbQc4Qa3RU5tXAo2dB4Xl/7bUL6SqBxO866kX3wPxKdSOIL58tq8iH9XbUe3Sxi8/ISQ==", "requires": { "lodash.debounce": "^4.0.8" } }, "@vuepress/plugin-google-analytics": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.4.1.tgz", - "integrity": "sha512-s43V5QHdTz0ayfy5vZrfMPpZzJBsj9L79TaxyMux1jOviS7oeWqkvNSblaHwP4Y8BxISehsKte8qsblQEN3zvQ==" + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.7.1.tgz", + "integrity": "sha512-27fQzRMsqGYpMf+ruyhsdfLv/n6z6b6LutFLE/pH66Itlh6ox9ew31x0pqYBbWIC/a4lBfXYUwFvi+DEvlb1EQ==" }, "@vuepress/plugin-last-updated": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.4.0.tgz", - "integrity": "sha512-sNxCXDz7AO4yIAZTEGt9TaLpJ2E0dgJGWx79nDFKfvpITn+Q2p7dUzkyVVxXs3TWXffoElGdNj/xIL5AUkg2qg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.7.1.tgz", + "integrity": "sha512-VW5jhBuO0WRHDsBmFsKC6QtEyBLCgyhuH9nQ65aairCn3tdoJPz0uQ4g3lr/boVbgsPexO677Sn3dRPgYqnMug==", "requires": { "cross-spawn": "^6.0.5" } }, "@vuepress/plugin-nprogress": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.4.0.tgz", - "integrity": "sha512-hJ9phJHONWWZqcWztbVtmmRjZduHQHIOBifUBvAfAGcuOBLVHqRnv3i7XD5UB3MIWPM1/bAoTA2TVs4sb9Wg4Q==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.7.1.tgz", + "integrity": "sha512-KtqfI3RitbsEbm22EhbooTvhjfMf6zttKlbND7LcyJwP3MEPVYyzQJuET03hk9z4SgCfNV2r/W3sYyejzzTMog==", "requires": { "nprogress": "^0.2.0" } }, "@vuepress/plugin-register-components": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.4.0.tgz", - "integrity": "sha512-HmSzCTPVrlJJ8PSIXAvh4RkPy9bGmdrQuAXAtjiiq5rzBjL3uIg2VwzTrKDqf7FkCKs4lcRAEuNxB70bH6tddA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.7.1.tgz", + "integrity": "sha512-MlFdH6l3rTCJlGMvyssXVG998cq5LSMzxCuQLYcRdtHQT4HbikIcV4HSPGarWInD1mP12+qX/PvKUawGwp1eVg==", "requires": { - "@vuepress/shared-utils": "^1.4.0" + "@vuepress/shared-utils": "1.7.1" } }, "@vuepress/plugin-search": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.4.0.tgz", - "integrity": "sha512-5K02DL9Wqlfy/aNiYXdbXBOGzR9zMNKz/P8lfHDU+ZOjtfNf6ImAdUkHS4pi70YkkTuemdYM8JjG/j5UYn6Rjw==" + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.7.1.tgz", + "integrity": "sha512-OmiGM5eYg9c+uC50b6/cSxAhqxfD7AIui6JEztFGeECrlP33RLHmteXK9YBBZjp5wTNmoYs+NXI/cWggYUPW8Q==" }, "@vuepress/shared-utils": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.4.0.tgz", - "integrity": "sha512-6QTv7zMRXAojCuPRIm4aosYfrQO4OREhyxvbFeg/ZMWkVX+xZZQTdE7ZyK/4NAvEgkpjtPTRC1TQYhLJUqC5mQ==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.7.1.tgz", + "integrity": "sha512-ydB2ZKsFZE6hFRb9FWqzZksxAPIMJjtBawk50RP6F+YX5HbID/HlyYYZM9aDSbk6RTkjgB5UzJjggA2xM8POlw==", "requires": { "chalk": "^2.3.2", - "diacritics": "^1.3.0", "escape-html": "^1.0.3", "fs-extra": "^7.0.1", "globby": "^9.2.0", "gray-matter": "^4.0.1", "hash-sum": "^1.0.2", "semver": "^6.0.0", + "toml": "^3.0.0", "upath": "^1.1.0" } }, "@vuepress/theme-default": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.4.0.tgz", - "integrity": "sha512-4ywWVfXZTBha+yuvWoa1HRg0vMpT2wZF3zuW0PDXkDzxqP4DkLljJk8mPpepyuPYlSThn+gHNC8kmnNBbGp3Tw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.7.1.tgz", + "integrity": "sha512-a9HeTrlcWQj3ONHiABmlN2z9TyIxKfQtLsA8AL+WgjN3PikhFuZFIJGzfr+NLt67Y9oiI+S9ZfiaVyvWM+7bWQ==", "requires": { - "@vuepress/plugin-active-header-links": "^1.4.0", - "@vuepress/plugin-nprogress": "^1.4.0", - "@vuepress/plugin-search": "^1.4.0", + "@vuepress/plugin-active-header-links": "1.7.1", + "@vuepress/plugin-nprogress": "1.7.1", + "@vuepress/plugin-search": "1.7.1", "docsearch.js": "^2.5.2", "lodash": "^4.17.15", - "stylus": "^0.54.5", + "stylus": "^0.54.8", "stylus-loader": "^3.0.2", "vuepress-plugin-container": "^2.0.2", "vuepress-plugin-smooth-scroll": "^0.0.3" @@ -1590,9 +1851,9 @@ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=" }, "ajv": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz", - "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -1606,55 +1867,29 @@ "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==" }, "ajv-keywords": { - "version": "3.4.1", - "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.4.1.tgz", - "integrity": "sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ==" + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" }, "algoliasearch": { - "version": "3.35.1", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", - "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", - "requires": { - "agentkeepalive": "^2.2.0", - "debug": "^2.6.9", - "envify": "^4.0.0", - "es6-promise": "^4.1.0", - "events": "^1.1.0", - "foreach": "^2.0.5", - "global": "^4.3.2", - "inherits": "^2.0.1", - "isarray": "^2.0.1", - "load-script": "^1.0.0", - "object-keys": "^1.0.11", - "querystring-es3": "^0.2.1", - "reduce": "^1.0.1", - "semver": "^5.1.0", - "tunnel-agent": "^0.6.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" - }, - "isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" - }, - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" - } + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.6.0.tgz", + "integrity": "sha512-f4QVfUYnWIGZwOupZh0RAqW8zEfpZAcZG6ZT0p6wDMztEyKBrjjbTXBk9p9uEaJqoIhFUm6TtApOxodTdHbqvw==", + "requires": { + "@algolia/cache-browser-local-storage": "4.6.0", + "@algolia/cache-common": "4.6.0", + "@algolia/cache-in-memory": "4.6.0", + "@algolia/client-account": "4.6.0", + "@algolia/client-analytics": "4.6.0", + "@algolia/client-common": "4.6.0", + "@algolia/client-recommendation": "4.6.0", + "@algolia/client-search": "4.6.0", + "@algolia/logger-common": "4.6.0", + "@algolia/logger-console": "4.6.0", + "@algolia/requester-browser-xhr": "4.6.0", + "@algolia/requester-common": "4.6.0", + "@algolia/requester-node-http": "4.6.0", + "@algolia/transporter": "4.6.0" } }, "align-text": { @@ -1688,31 +1923,6 @@ "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", "requires": { "string-width": "^3.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "requires": { - "ansi-regex": "^4.1.0" - } - } } }, "ansi-colors": { @@ -1830,13 +2040,21 @@ } }, "asn1.js": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.10.1.tgz", - "integrity": "sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==", + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-5.4.1.tgz", + "integrity": "sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==", "requires": { "bn.js": "^4.0.0", "inherits": "^2.0.1", - "minimalistic-assert": "^1.0.0" + "minimalistic-assert": "^1.0.0", + "safer-buffer": "^2.1.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "assert": { @@ -1910,17 +2128,17 @@ } }, "autoprefixer": { - "version": "9.7.6", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.7.6.tgz", - "integrity": "sha512-F7cYpbN7uVVhACZTeeIeealwdGM6wMtfWARVLTy5xmKtgVdBNJvbDRoCK3YO1orcs7gv/KwYlb3iXwu9Ug9BkQ==", + "version": "9.8.6", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.8.6.tgz", + "integrity": "sha512-XrvP4VVHdRBCdX1S3WXVD8+RyG9qeb1D5Sn1DeLiG2xfSpzellk5k54xbUERJ3M5DggQxes39UGOTP8CFrEGbg==", "requires": { - "browserslist": "^4.11.1", - "caniuse-lite": "^1.0.30001039", - "chalk": "^2.4.2", + "browserslist": "^4.12.0", + "caniuse-lite": "^1.0.30001109", + "colorette": "^1.2.1", "normalize-range": "^0.1.2", "num2fraction": "^1.2.2", - "postcss": "^7.0.27", - "postcss-value-parser": "^4.0.3" + "postcss": "^7.0.32", + "postcss-value-parser": "^4.1.0" } }, "aws-sign2": { @@ -1929,16 +2147,23 @@ "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" }, "aws4": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.9.1.tgz", - "integrity": "sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug==" + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" }, "axios": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-0.19.2.tgz", - "integrity": "sha512-fjgm5MvRHLhx+osE2xoekY70AhARk3a6hkN+3Io1jc00jtquGvxYlKlsFUhmUET0V5te6CcZI7lcv2Ym61mjHA==", + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.0.tgz", + "integrity": "sha512-fmkJBknJKoZwem3/IKSSLpkdNXZeBu5Q7GA/aRsr2btgrptmSCxi2oFjZHqGdK9DoTil9PIHlPIZw2EcRJXRvw==", "requires": { - "follow-redirects": "1.5.10" + "follow-redirects": "^1.10.0" + }, + "dependencies": { + "follow-redirects": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.0.tgz", + "integrity": "sha512-aq6gF1BEKje4a9i9+5jimNFIpq4Q1WiwBToeRK5NvZBd/TRsmW8BsJfOEGkr76TbOyPVD3OVDN910EcUNtRYEA==" + } } }, "babel-loader": { @@ -1951,12 +2176,22 @@ "mkdirp": "^0.5.3", "pify": "^4.0.1", "schema-utils": "^2.6.5" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + } } }, "babel-plugin-dynamic-import-node": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz", - "integrity": "sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz", + "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==", "requires": { "object.assign": "^4.1.0" } @@ -2084,9 +2319,9 @@ "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==" }, "bn.js": { - "version": "4.11.8", - "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.8.tgz", - "integrity": "sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA==" + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.1.3.tgz", + "integrity": "sha512-GkTiFpjFtUzU9CbMeJ5iazkCzGL3jrhzerzZIuqLABjbwRaFt33I9tUdSNryIptM+RxDet6OKm2WnLXzW51KsQ==" }, "body-parser": { "version": "1.19.0", @@ -2159,11 +2394,10 @@ "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" }, "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -2228,9 +2462,9 @@ } }, "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "requires": { "has-flag": "^4.0.0" } @@ -2314,20 +2548,29 @@ "requires": { "bn.js": "^4.1.0", "randombytes": "^2.0.1" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "browserify-sign": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.0.4.tgz", - "integrity": "sha1-qk62jl17ZYuqa/alfmMMvXqT0pg=", + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/browserify-sign/-/browserify-sign-4.2.1.tgz", + "integrity": "sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==", "requires": { - "bn.js": "^4.1.1", - "browserify-rsa": "^4.0.0", - "create-hash": "^1.1.0", - "create-hmac": "^1.1.2", - "elliptic": "^6.0.0", - "inherits": "^2.0.1", - "parse-asn1": "^5.0.0" + "bn.js": "^5.1.1", + "browserify-rsa": "^4.0.1", + "create-hash": "^1.2.0", + "create-hmac": "^1.1.7", + "elliptic": "^6.5.3", + "inherits": "^2.0.4", + "parse-asn1": "^5.1.5", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" } }, "browserify-zlib": { @@ -2339,14 +2582,14 @@ } }, "browserslist": { - "version": "4.11.1", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.11.1.tgz", - "integrity": "sha512-DCTr3kDrKEYNw6Jb9HFxVLQNaue8z+0ZfRBRjmCunKDEXEBajKDj2Y+Uelg+Pi29OnvaSGwjOsnRyNEkXzHg5g==", + "version": "4.14.6", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.14.6.tgz", + "integrity": "sha512-zeFYcUo85ENhc/zxHbiIp0LGzzTrE2Pv2JhxvS7kpUb9Q9D38kUX6Bie7pGutJ/5iF5rOxE7CepAuWD56xJ33A==", "requires": { - "caniuse-lite": "^1.0.30001038", - "electron-to-chromium": "^1.3.390", - "node-releases": "^1.1.53", - "pkg-up": "^2.0.0" + "caniuse-lite": "^1.0.30001154", + "electron-to-chromium": "^1.3.585", + "escalade": "^3.1.1", + "node-releases": "^1.1.65" } }, "buffer": { @@ -2390,9 +2633,9 @@ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "cac": { - "version": "6.5.8", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.5.8.tgz", - "integrity": "sha512-jLv2+ps4T2HRVR1k4UlQZoAFvliAhf5LVR0yjPjIaIr/Cw99p/I7CXIEkXtw5q+AkYk4NCFJcF5ErmELSyrZnw==" + "version": "6.6.1", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.6.1.tgz", + "integrity": "sha512-uhki4T3Ax68hw7Dufi0bATVAF8ayBSwOKUEJHjObPrUN4tlQ8Lf7oljpTje/mArLxYN0D743c2zJt4C1bVTCqg==" }, "cacache": { "version": "12.0.4", @@ -2414,6 +2657,16 @@ "ssri": "^6.0.1", "unique-filename": "^1.1.1", "y18n": "^4.0.0" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + } } }, "cache-base": { @@ -2445,6 +2698,14 @@ "schema-utils": "^1.0.0" }, "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -2472,9 +2733,9 @@ }, "dependencies": { "get-stream": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.1.0.tgz", - "integrity": "sha512-EXr1FOzrzTfGeL0gQdeFEvOMm2mzMOglyiOXSTpPC+iAjAKftbr3jpCMWynogwYnM+eSj9sHGc6wjIcDvYiygw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", + "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", "requires": { "pump": "^3.0.0" } @@ -2491,6 +2752,15 @@ } } }, + "call-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.0.tgz", + "integrity": "sha512-AEXsYIyyDY3MCzbwdhzG3Jx1R0J2wetQyUynn6dYHAO+bg8l1k7jwZtRv4ryryFs7EP+NDlikJlVe59jr0cM2w==", + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.0" + } + }, "call-me-maybe": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz", @@ -2543,9 +2813,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001039", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001039.tgz", - "integrity": "sha512-SezbWCTT34eyFoWHgx8UWso7YtvtM7oosmFoXbCkdC6qJzRfBTeTgE9REtKtiuKXuMwWTZEvdnFNGAyVMorv8Q==" + "version": "1.0.30001154", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001154.tgz", + "integrity": "sha512-y9DvdSti8NnYB9Be92ddMZQrcOe04kcQtcxtBx4NkB04+qZ+JUWotnXBJTmxlKudhxNTQ3RRknMwNU2YQl/Org==" }, "caseless": { "version": "0.12.0", @@ -2675,9 +2945,9 @@ } }, "cli-boxes": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.0.tgz", - "integrity": "sha512-gpaBrMAizVEANOpfZp/EEUixTXDyGt7DFzdK5hU+UbWt/J0lB0w20ncZj59Z9a93xHb9u12zF5BS6i9RKbtg4w==" + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", + "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==" }, "clipboard": { "version": "2.0.6", @@ -2691,9 +2961,9 @@ } }, "clipboard-copy": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.1.0.tgz", - "integrity": "sha512-Xsu1NddBXB89IUauda5BIq3Zq73UWkjkaQlPQbLNvNsd5WBMnTWPNKYR6HGaySOxGYZ+BKxP2E9X4ElnI3yiPA==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/clipboard-copy/-/clipboard-copy-3.2.0.tgz", + "integrity": "sha512-vooFaGFL6ulEP1liiaWFBmmfuPm3cY3y7T9eB83ZTnYc/oFeAKsq3NcDrOkBC8XaauEE8zHQwI7k0+JSYiVQSQ==" }, "cliui": { "version": "2.1.0", @@ -2723,11 +2993,6 @@ "q": "^1.1.2" } }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" - }, "collection-visit": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", @@ -2738,12 +3003,12 @@ } }, "color": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/color/-/color-3.1.2.tgz", - "integrity": "sha512-vXTJhHebByxZn3lDvDJYw4lR5+uB3vuoHsuYA5AKuxRVn5wzzIfQKGLBmgdVRHKTJYeK5rvJcHnrd0Li49CFpg==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/color/-/color-3.1.3.tgz", + "integrity": "sha512-xgXAcTHa2HeFCGLE9Xs/R82hujGtu9Jd9x4NW3T34+OMs7VoPsjwzRczKHvTAHeJwWFwX5j15+MgAppE8ztObQ==", "requires": { "color-convert": "^1.9.1", - "color-string": "^1.5.2" + "color-string": "^1.5.4" } }, "color-convert": { @@ -2760,14 +3025,19 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.4.tgz", + "integrity": "sha512-57yF5yt8Xa3czSEW1jfQDE79Idk0+AkN/4KWad6tbdxUmAs3MvjxlWSWD4deYytcRfoZ9nhKyFl1kj5tBvidbw==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, + "colorette": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.1.tgz", + "integrity": "sha512-puCDz0CzydiSYOrnXpz/PKd69zRrribezjtE9yd4zvytoRc8+RY/KJPvtPFKZS3E3wP6neGyMe0vOTlHO5L3Pw==" + }, "combined-stream": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", @@ -2887,9 +3157,9 @@ }, "dependencies": { "make-dir": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.0.2.tgz", - "integrity": "sha512-rYKABKutXa6vXTXhoV18cBE7PaewPXHe/Bdq4v+ZLMhxbWApkFFplT0LcbMW+6BbjnQXzZ/sAvSE/JdguApG5w==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", "requires": { "semver": "^6.0.0" } @@ -2902,9 +3172,9 @@ "integrity": "sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg==" }, "consola": { - "version": "2.11.3", - "resolved": "https://registry.npmjs.org/consola/-/consola-2.11.3.tgz", - "integrity": "sha512-aoW0YIIAmeftGR8GSpw6CGQluNdkWMWh3yEFjH/hmynTYnMtibXszii3lxCXmk8YxJtI3FAK5aTiquA5VH68Gw==" + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.0.tgz", + "integrity": "sha512-vlcSGgdYS26mPf7qNi+dCisbhiyDnrN1zaRbw3CSuc2wGOMEGGPsp46PdRG5gqXwgtJfjxDkxRNAgRPr1B77vQ==" }, "console-browserify": { "version": "1.2.0", @@ -2991,6 +3261,16 @@ "mkdirp": "^0.5.1", "rimraf": "^2.5.4", "run-queue": "^1.0.0" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + } } }, "copy-descriptor": { @@ -2999,9 +3279,9 @@ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=" }, "copy-webpack-plugin": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.1.tgz", - "integrity": "sha512-P15M5ZC8dyCjQHWwd4Ia/dm0SgVvZJMYeykVIVYXbGyqO4dWB5oyPHp9i7wjwo5LhtlhKbiBCdS2NvM07Wlybg==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-5.1.2.tgz", + "integrity": "sha512-Uh7crJAco3AjBvgAy9Z75CjK8IG+gxaErro71THQ+vv/bl4HaQcpkexAY8KVW/T6D2W2IRr+couF/knIRkZMIQ==", "requires": { "cacache": "^12.0.3", "find-cache-dir": "^2.1.0", @@ -3013,7 +3293,7 @@ "normalize-path": "^3.0.0", "p-limit": "^2.2.1", "schema-utils": "^1.0.0", - "serialize-javascript": "^2.1.2", + "serialize-javascript": "^4.0.0", "webpack-log": "^2.0.0" }, "dependencies": { @@ -3035,19 +3315,6 @@ "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, "pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", @@ -3076,11 +3343,11 @@ "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==" }, "core-js-compat": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.4.tgz", - "integrity": "sha512-zAa3IZPvsJ0slViBQ2z+vgyyTuhd3MFn1rBQjZSKVEgB0UMYhUkCj9jJUVPgGTGqWvsBVmfnruXgTcNyTlEiSA==", + "version": "3.6.5", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.5.tgz", + "integrity": "sha512-7ItTKOhOZbznhXAQ2g/slGg1PJV5zDO/WdkTwi7UEOJmkvsE32PWvx6mKtDjiMpjnR2CNf6BAD6sSxIlv7ptng==", "requires": { - "browserslist": "^4.8.3", + "browserslist": "^4.8.5", "semver": "7.0.0" }, "dependencies": { @@ -3108,12 +3375,19 @@ } }, "create-ecdh": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.3.tgz", - "integrity": "sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/create-ecdh/-/create-ecdh-4.0.4.tgz", + "integrity": "sha512-mf+TCx8wWc9VpuxfP2ht0iSISLZnt0JgWlrOKZiNqyUZWnjIaCIVNQArMHnCZKfEYRg6IM7A+NeJoN8gf/Ws0A==", "requires": { "bn.js": "^4.1.0", - "elliptic": "^6.0.0" + "elliptic": "^6.5.3" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "create-hash": { @@ -3363,26 +3637,26 @@ "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==" }, "csso": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.0.3.tgz", - "integrity": "sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.1.0.tgz", + "integrity": "sha512-h+6w/W1WqXaJA4tb1dk7r5tVbOm97MsKxzwnvOR04UQ6GILroryjMWu3pmCCtL2mLaEStQ0fZgeGiy99mo7iyg==", "requires": { - "css-tree": "1.0.0-alpha.39" + "css-tree": "^1.0.0" }, "dependencies": { "css-tree": { - "version": "1.0.0-alpha.39", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.39.tgz", - "integrity": "sha512-7UvkEYgBAHRG9Nt980lYxjsTrCyHFN53ky3wVsDkiMdVqylqRt+Zc+jm5qw7/qyOvN2dHSYtX0e4MbCCExSvnA==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0.tgz", + "integrity": "sha512-CdVYz/Yuqw0VdKhXPBIgi8DO3NicJVYZNWeX9XcIuSp9ZoFT5IcleVRW07O5rMjdcx1mb+MEJPknTTEW7DdsYw==", "requires": { - "mdn-data": "2.0.6", + "mdn-data": "2.0.12", "source-map": "^0.6.1" } }, "mdn-data": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.6.tgz", - "integrity": "sha512-rQvjv71olwNHgiTbfPZFkJtjNMciWgswYeciZhtvWLO8bmX3TnhyA62I6sTWOyZssWHJJjY6/KiWwqQsWWsqOA==" + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.12.tgz", + "integrity": "sha512-ULbAlgzVb8IqZ0Hsxm6hHSlQl3Jckst2YEQS7fODu9ilNWy2LvcoSY7TRFIktABP2mdppBioc66va90T+NUs8Q==" } } }, @@ -3582,11 +3856,6 @@ "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.0.4.tgz", "integrity": "sha512-ZIzRpLJrOj7jjP2miAtgqIfmzbxa4ZOr5jJc601zklsfEx9oTzmmj2nVpIPRpNlRTIh8lc1kyViIY7BWSGNmKw==" }, - "diacritics": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/diacritics/-/diacritics-1.3.0.tgz", - "integrity": "sha1-PvqHMj67hj5mls67AILUj/PW96E=" - }, "diffie-hellman": { "version": "5.0.3", "resolved": "https://registry.npmjs.org/diffie-hellman/-/diffie-hellman-5.0.3.tgz", @@ -3595,6 +3864,13 @@ "bn.js": "^4.1.0", "miller-rabin": "^4.0.0", "randombytes": "^2.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "dir-glob": { @@ -3639,6 +3915,53 @@ "stack-utils": "^1.0.1", "to-factory": "^1.0.0", "zepto": "^1.2.0" + }, + "dependencies": { + "algoliasearch": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-3.35.1.tgz", + "integrity": "sha512-K4yKVhaHkXfJ/xcUnil04xiSrB8B8yHZoFEhWNpXg23eiCnqvTZw1tn/SqvdsANlYHLJlKl0qi3I/Q2Sqo7LwQ==", + "requires": { + "agentkeepalive": "^2.2.0", + "debug": "^2.6.9", + "envify": "^4.0.0", + "es6-promise": "^4.1.0", + "events": "^1.1.0", + "foreach": "^2.0.5", + "global": "^4.3.2", + "inherits": "^2.0.1", + "isarray": "^2.0.1", + "load-script": "^1.0.0", + "object-keys": "^1.0.11", + "querystring-es3": "^0.2.1", + "reduce": "^1.0.1", + "semver": "^5.1.0", + "tunnel-agent": "^0.6.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "events": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", + "integrity": "sha1-nr23Y1rQmccNzEwqH1AEKI6L2SQ=" + }, + "isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" + }, + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } } }, "doctypes": { @@ -3703,9 +4026,9 @@ } }, "dot-prop": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.2.0.tgz", - "integrity": "sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", + "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", "requires": { "is-obj": "^2.0.0" } @@ -3770,14 +4093,14 @@ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "electron-to-chromium": { - "version": "1.3.398", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.398.tgz", - "integrity": "sha512-BJjxuWLKFbM5axH3vES7HKMQgAknq9PZHBkMK/rEXUQG9i1Iw5R+6hGkm6GtsQSANjSUrh/a6m32nzCNDNo/+w==" + "version": "1.3.586", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.586.tgz", + "integrity": "sha512-or8FCbQCRlPZHkOoqBULOI9hzTiStVIQqDLgAPt8pzY+swTrW+89vsqd24Zn+Iv4guAJLxRBD6OR5AmbpabGDA==" }, "elliptic": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.2.tgz", - "integrity": "sha512-f4x70okzZbIQl/NSRLkI/+tteV/9WqL98zx+SQ69KbXxmVrmjwsNUPn/gYJJ0sHvEak24cZgHIPegRePAtA/xw==", + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/elliptic/-/elliptic-6.5.3.tgz", + "integrity": "sha512-IMqzv5wNQf+E6aHeIqATs0tOLeOTwj1QKbRcS3jBbYkl5oLAserA8yJTT7/VyHUYG91PRmPyeQDObKLPpeS4dw==", "requires": { "bn.js": "^4.4.0", "brorand": "^1.0.1", @@ -3786,6 +4109,13 @@ "inherits": "^2.0.1", "minimalistic-assert": "^1.0.0", "minimalistic-crypto-utils": "^1.0.0" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "emoji-regex": { @@ -3812,9 +4142,9 @@ } }, "enhanced-resolve": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz", - "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.3.0.tgz", + "integrity": "sha512-3e87LvavsdxyoCfGusJnrZ5G8SLPOFeHSNpZI/ATL9a5leXo2k0w6MKnbqhdBad9qTobSfB20Ld7UmgoNbAZkQ==", "requires": { "graceful-fs": "^4.1.2", "memory-fs": "^0.5.0", @@ -3860,9 +4190,9 @@ } }, "entities": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.0.tgz", - "integrity": "sha512-D9f7V0JSRwIxlRI2mjMqufDrRDnx8p+eEOz7aUM9SuvF8gsBzra0/6tbjl1m8eQHrZlYj6PxqE00hZ1SAIKPLw==" + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", + "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==" }, "envify": { "version": "4.1.0", @@ -3874,9 +4204,9 @@ } }, "envinfo": { - "version": "7.5.0", - "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.5.0.tgz", - "integrity": "sha512-jDgnJaF/Btomk+m3PZDTTCb5XIIIX3zYItnCRfF73zVgvinLoRomuhi75Y4su0PtQxWz4v66XnLLckyvyJTOIQ==" + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/envinfo/-/envinfo-7.7.3.tgz", + "integrity": "sha512-46+j5QxbPWza0PB1i15nZx0xQ4I/EfQxg9J8Had3b408SV63nEtor2e+oiY63amTo9KTuh2a3XLObNwduxYwwA==" }, "errno": { "version": "0.1.7", @@ -3895,21 +4225,21 @@ } }, "es-abstract": { - "version": "1.17.5", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.5.tgz", - "integrity": "sha512-BR9auzDbySxOcfog0tLECW8l28eRGpDpU3Dm3Hp4q/N+VtLTmyj4EUN088XZWQDW/hzj6sYRDXeOFsaAODKvpg==", + "version": "1.17.7", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.7.tgz", + "integrity": "sha512-VBl/gnfcJ7OercKA9MVaegWsBHFjV492syMudcnQZvt/Dw8ezpcOHYZXa/J96O8vx+g4x65YKhxOwDUh63aS5g==", "requires": { "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", "has": "^1.0.3", "has-symbols": "^1.0.1", - "is-callable": "^1.1.5", - "is-regex": "^1.0.5", - "object-inspect": "^1.7.0", + "is-callable": "^1.2.2", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", "object-keys": "^1.1.1", - "object.assign": "^4.1.0", - "string.prototype.trimleft": "^2.1.1", - "string.prototype.trimright": "^2.1.1" + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" } }, "es-to-primitive": { @@ -3927,6 +4257,11 @@ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + }, "escape-goat": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", @@ -3951,17 +4286,29 @@ "estraverse": "^4.1.1" } }, + "esm": { + "version": "3.2.25", + "resolved": "https://registry.npmjs.org/esm/-/esm-3.2.25.tgz", + "integrity": "sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==" + }, "esprima": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" }, "esrecurse": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", - "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "requires": { - "estraverse": "^4.1.0" + "estraverse": "^5.2.0" + }, + "dependencies": { + "estraverse": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.2.0.tgz", + "integrity": "sha512-BxbNGGNm0RyRYvUdHpIwv9IWzeM9XClbOxwoATuFdOE7ZE6wHL+HQ5T8hoPM+zHvmKzzsEqhgy0GrQ5X13afiQ==" + } } }, "estraverse": { @@ -3980,14 +4327,14 @@ "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" }, "eventemitter3": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.0.tgz", - "integrity": "sha512-qerSRB0p+UDEssxTtm6EDKcE7W4OaoisfIMl4CngyEhjpYglocpNg6UEqCvemdGhosAsg4sO2dXJOdyBifPGCg==" + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" }, "events": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.1.0.tgz", - "integrity": "sha512-Rv+u8MLHNOdMjTAFeT3nCjHn2aGlx435FP/sDHNaRhDEMwyI/aB22Kj2qIN8R0cw3z28psEQLYwxVKLsKrMgWg==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.2.0.tgz", + "integrity": "sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg==" }, "eventsource": { "version": "1.0.7", @@ -4179,9 +4526,9 @@ "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" }, "fast-deep-equal": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz", - "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" }, "fast-glob": { "version": "2.2.7", @@ -4295,11 +4642,11 @@ } }, "find-up": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", - "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", "requires": { - "locate-path": "^2.0.0" + "locate-path": "^3.0.0" } }, "flush-write-stream": { @@ -4444,412 +4791,16 @@ "resolved": "https://registry.npmjs.org/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz", "integrity": "sha1-tH31NJPvkR33VzHnCp3tAYnbQMk=", "requires": { - "graceful-fs": "^4.1.2", - "iferr": "^0.1.5", - "imurmurhash": "^0.1.4", - "readable-stream": "1 || 2" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "fsevents": { - "version": "1.2.12", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.12.tgz", - "integrity": "sha512-Ggd/Ktt7E7I8pxZRbGIs7vwqAPscSESMrCSkx2FtWeqmheJgCo2R74fTsZFCifr0VTPwqRpPv17+6b8Zp7th0Q==", - "optional": true, - "requires": { - "bindings": "^1.5.0", - "nan": "^2.12.1", - "node-pre-gyp": "*" - }, - "dependencies": { - "abbrev": { - "version": "1.1.1", - "bundled": true, - "optional": true - }, - "ansi-regex": { - "version": "2.1.1", - "bundled": true, - "optional": true - }, - "aproba": { - "version": "1.2.0", - "bundled": true, - "optional": true - }, - "are-we-there-yet": { - "version": "1.1.5", - "bundled": true, - "optional": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" - } - }, - "balanced-match": { - "version": "1.0.0", - "bundled": true, - "optional": true - }, - "brace-expansion": { - "version": "1.1.11", - "bundled": true, - "optional": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "chownr": { - "version": "1.1.4", - "bundled": true, - "optional": true - }, - "code-point-at": { - "version": "1.1.0", - "bundled": true, - "optional": true - }, - "concat-map": { - "version": "0.0.1", - "bundled": true, - "optional": true - }, - "console-control-strings": { - "version": "1.1.0", - "bundled": true, - "optional": true - }, - "core-util-is": { - "version": "1.0.2", - "bundled": true, - "optional": true - }, - "debug": { - "version": "3.2.6", - "bundled": true, - "optional": true, - "requires": { - "ms": "^2.1.1" - } - }, - "deep-extend": { - "version": "0.6.0", - "bundled": true, - "optional": true - }, - "delegates": { - "version": "1.0.0", - "bundled": true, - "optional": true - }, - "detect-libc": { - "version": "1.0.3", - "bundled": true, - "optional": true - }, - "fs-minipass": { - "version": "1.2.7", - "bundled": true, - "optional": true, - "requires": { - "minipass": "^2.6.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "bundled": true, - "optional": true - }, - "gauge": { - "version": "2.7.4", - "bundled": true, - "optional": true, - "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - } - }, - "glob": { - "version": "7.1.6", - "bundled": true, - "optional": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "has-unicode": { - "version": "2.0.1", - "bundled": true, - "optional": true - }, - "iconv-lite": { - "version": "0.4.24", - "bundled": true, - "optional": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ignore-walk": { - "version": "3.0.3", - "bundled": true, - "optional": true, - "requires": { - "minimatch": "^3.0.4" - } - }, - "inflight": { - "version": "1.0.6", - "bundled": true, - "optional": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "bundled": true, - "optional": true - }, - "ini": { - "version": "1.3.5", - "bundled": true, - "optional": true - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "bundled": true, - "optional": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "isarray": { - "version": "1.0.0", - "bundled": true, - "optional": true - }, - "minimatch": { - "version": "3.0.4", - "bundled": true, - "optional": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "bundled": true, - "optional": true - }, - "minipass": { - "version": "2.9.0", - "bundled": true, - "optional": true, - "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" - } - }, - "minizlib": { - "version": "1.3.3", - "bundled": true, - "optional": true, - "requires": { - "minipass": "^2.9.0" - } - }, - "mkdirp": { - "version": "0.5.3", - "bundled": true, - "optional": true, - "requires": { - "minimist": "^1.2.5" - } - }, - "ms": { - "version": "2.1.2", - "bundled": true, - "optional": true - }, - "needle": { - "version": "2.3.3", - "bundled": true, - "optional": true, - "requires": { - "debug": "^3.2.6", - "iconv-lite": "^0.4.4", - "sax": "^1.2.4" - } - }, - "node-pre-gyp": { - "version": "0.14.0", - "bundled": true, - "optional": true, - "requires": { - "detect-libc": "^1.0.2", - "mkdirp": "^0.5.1", - "needle": "^2.2.1", - "nopt": "^4.0.1", - "npm-packlist": "^1.1.6", - "npmlog": "^4.0.2", - "rc": "^1.2.7", - "rimraf": "^2.6.1", - "semver": "^5.3.0", - "tar": "^4.4.2" - } - }, - "nopt": { - "version": "4.0.3", - "bundled": true, - "optional": true, - "requires": { - "abbrev": "1", - "osenv": "^0.1.4" - } - }, - "npm-bundled": { - "version": "1.1.1", - "bundled": true, - "optional": true, - "requires": { - "npm-normalize-package-bin": "^1.0.1" - } - }, - "npm-normalize-package-bin": { - "version": "1.0.1", - "bundled": true, - "optional": true - }, - "npm-packlist": { - "version": "1.4.8", - "bundled": true, - "optional": true, - "requires": { - "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1", - "npm-normalize-package-bin": "^1.0.1" - } - }, - "npmlog": { - "version": "4.1.2", - "bundled": true, - "optional": true, - "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "number-is-nan": { - "version": "1.0.1", - "bundled": true, - "optional": true - }, - "object-assign": { - "version": "4.1.1", - "bundled": true, - "optional": true - }, - "once": { - "version": "1.4.0", - "bundled": true, - "optional": true, - "requires": { - "wrappy": "1" - } - }, - "os-homedir": { - "version": "1.0.2", - "bundled": true, - "optional": true - }, - "os-tmpdir": { - "version": "1.0.2", - "bundled": true, - "optional": true - }, - "osenv": { - "version": "0.1.5", - "bundled": true, - "optional": true, - "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.0" - } - }, - "path-is-absolute": { - "version": "1.0.1", - "bundled": true, - "optional": true - }, - "process-nextick-args": { - "version": "2.0.1", - "bundled": true, - "optional": true - }, - "rc": { - "version": "1.2.8", - "bundled": true, - "optional": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, + "graceful-fs": "^4.1.2", + "iferr": "^0.1.5", + "imurmurhash": "^0.1.4", + "readable-stream": "1 || 2" + }, + "dependencies": { "readable-stream": { "version": "2.3.7", - "bundled": true, - "optional": true, + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "requires": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", @@ -4860,114 +4811,36 @@ "util-deprecate": "~1.0.1" } }, - "rimraf": { - "version": "2.7.1", - "bundled": true, - "optional": true, - "requires": { - "glob": "^7.1.3" - } - }, "safe-buffer": { "version": "5.1.2", - "bundled": true, - "optional": true - }, - "safer-buffer": { - "version": "2.1.2", - "bundled": true, - "optional": true - }, - "sax": { - "version": "1.2.4", - "bundled": true, - "optional": true - }, - "semver": { - "version": "5.7.1", - "bundled": true, - "optional": true - }, - "set-blocking": { - "version": "2.0.0", - "bundled": true, - "optional": true - }, - "signal-exit": { - "version": "3.0.2", - "bundled": true, - "optional": true - }, - "string-width": { - "version": "1.0.2", - "bundled": true, - "optional": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" }, "string_decoder": { "version": "1.1.1", - "bundled": true, - "optional": true, + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", "requires": { "safe-buffer": "~5.1.0" } - }, - "strip-ansi": { - "version": "3.0.1", - "bundled": true, - "optional": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "bundled": true, - "optional": true - }, - "tar": { - "version": "4.4.13", - "bundled": true, - "optional": true, - "requires": { - "chownr": "^1.1.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.8.6", - "minizlib": "^1.2.1", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.2", - "yallist": "^3.0.3" - } - }, - "util-deprecate": { - "version": "1.0.2", - "bundled": true, - "optional": true - }, - "wide-align": { - "version": "1.1.3", - "bundled": true, - "optional": true, - "requires": { - "string-width": "^1.0.2 || 2" - } - }, - "wrappy": { - "version": "1.0.2", - "bundled": true, - "optional": true - }, - "yallist": { - "version": "3.1.1", - "bundled": true, - "optional": true } } }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "fsevents": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz", + "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==", + "optional": true, + "requires": { + "bindings": "^1.5.0", + "nan": "^2.12.1" + } + }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", @@ -4979,14 +4852,24 @@ "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==" }, "gensync": { - "version": "1.0.0-beta.1", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.1.tgz", - "integrity": "sha512-r8EC6NO1sngH/zdD9fiRDLdcgnbayXah+mLgManTaIZJqEC1MZstmnox8KpnI2/fxQwrp5OpCOYWLp4rBl4Jcg==" + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==" }, "get-caller-file": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-1.0.3.tgz", - "integrity": "sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w==" + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" + }, + "get-intrinsic": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.0.1.tgz", + "integrity": "sha512-ZnWP+AmS1VUaLgTRy47+zKtjTxz+0xMpx3I52i+aalBK1QP19ggLF3Db89KJX7kjfOfP2eoa01qc++GwPgufPg==", + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1" + } }, "get-stream": { "version": "4.1.0", @@ -5111,9 +4994,9 @@ } }, "graceful-fs": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", - "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==" + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", + "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" }, "gray-matter": { "version": "4.0.2", @@ -5137,11 +5020,11 @@ "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" }, "har-validator": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.3.tgz", - "integrity": "sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g==", + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", "requires": { - "ajv": "^6.5.5", + "ajv": "^6.12.3", "har-schema": "^2.0.0" } }, @@ -5206,12 +5089,13 @@ "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==" }, "hash-base": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.0.4.tgz", - "integrity": "sha1-X8hoaEfs1zSZQDMZprCj8/auSRg=", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", + "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", "requires": { - "inherits": "^2.0.1", - "safe-buffer": "^5.0.1" + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" } }, "hash-sum": { @@ -5265,9 +5149,9 @@ } }, "hotkeys-js": { - "version": "3.7.6", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.7.6.tgz", - "integrity": "sha512-X5d16trjp79o+OaCn7syXu0cs+TkLYlK/teE5FhpD1Cj9ROcEIhfIQ7Mhrk761ynF3NQLbLn5xRojP2UuSqDAw==" + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.8.1.tgz", + "integrity": "sha512-YlhVQtyG9f1b7GhtzdhR0Pl+cImD1ZrKI6zYUa7QLd0zuThiL7RzZ+ANJyy7z+kmcCpNYBf5PjBa3CjiQ5PFpw==" }, "hpack.js": { "version": "2.1.6", @@ -5325,9 +5209,9 @@ "integrity": "sha512-P+M65QY2JQ5Y0G9KKdlDpo0zK+/OHptU5AaBwUfAIDJZk1MYf32Frm84EcOytfJE0t5JvkAnKlmjsXDnWzCJmQ==" }, "html-entities": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.2.1.tgz", - "integrity": "sha1-DfKTUfByEWNRXfueVUPl9u7VFi8=" + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-1.3.1.tgz", + "integrity": "sha512-rhE/4Z3hIhzHAUKbW8jVcCyuT5oJCXXqhN/6mXXVCpzTmvJnoH2HL/bt3EZ6p55jbFJBeAe1ZNpL5BugLujxNA==" }, "html-minifier": { "version": "3.5.21", @@ -5362,9 +5246,9 @@ } }, "html-tags": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-2.0.0.tgz", - "integrity": "sha1-ELMKOGCF9Dzt41PMj6fLDe7qZos=" + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.1.0.tgz", + "integrity": "sha512-1qYz89hW3lFDEazhjW0yVAV87lw8lVkrJocr72XmBkMKsoSVJCQx3W8BXsC7hO2qAt8BoVjYjtAcZ9perqGnNg==" }, "htmlparser2": { "version": "3.10.1", @@ -5415,15 +5299,10 @@ } } }, - "http-parser-js": { - "version": "0.4.10", - "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.4.10.tgz", - "integrity": "sha1-ksnBN0w1CF912zWexWzCV8u5P6Q=" - }, "http-proxy": { - "version": "1.18.0", - "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.0.tgz", - "integrity": "sha512-84I2iJM/n1d4Hdgc6y2+qY5mDaz2PUVjlg9znE9byl+q0uC3DeByqBGReQu5tpLK0TAqTIXScRUV+dg7+bUPpQ==", + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz", + "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==", "requires": { "eventemitter3": "^4.0.0", "follow-redirects": "^1.0.0", @@ -5478,9 +5357,9 @@ } }, "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==" }, "iferr": { "version": "0.1.5", @@ -5493,9 +5372,9 @@ "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==" }, "immediate": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.2.3.tgz", - "integrity": "sha1-0UD6j2FGWb1lQSMwl92qwlzdmRw=" + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", + "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" }, "import-cwd": { "version": "2.1.0", @@ -5579,19 +5458,6 @@ "ipaddr.js": "^1.9.0" } }, - "invariant": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz", - "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==", - "requires": { - "loose-envify": "^1.0.0" - } - }, - "invert-kv": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-2.0.0.tgz", - "integrity": "sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA==" - }, "ip": { "version": "1.1.5", "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", @@ -5654,9 +5520,9 @@ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" }, "is-callable": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.5.tgz", - "integrity": "sha512-ESKv5sMCJB2jnHTWZ3O5itG+O128Hsus4K4Qh1h2/cgn2vbgnLSVqfV46AeJA9D5EeeLa9w81KUXMtn34zhX+Q==" + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.2.tgz", + "integrity": "sha512-dnMqspv5nU3LoewK2N/y7KLtxtakvTuaCsU9FU50/QDmdbHNy/4/JuRtMHqRU22o3q+W89YQndQEeCVwK+3qrA==" }, "is-ci": { "version": "2.0.0", @@ -5686,6 +5552,14 @@ "rgba-regex": "^1.0.0" } }, + "is-core-module": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.0.0.tgz", + "integrity": "sha512-jq1AH6C8MuteOoBPwkxHafmByhL9j5q4OaPGdbuD+ZtQJVzH+i6E3BJDQcBA09k57i2Hh2yQbEG8yObZ0jdlWw==", + "requires": { + "has": "^1.0.3" + } + }, "is-data-descriptor": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", @@ -5786,6 +5660,11 @@ } } }, + "is-negative-zero": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.0.tgz", + "integrity": "sha1-lVOxIbD6wohp2p7UWeIMdUN4hGE=" + }, "is-npm": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz", @@ -5849,16 +5728,16 @@ } }, "is-promise": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz", - "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=" + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", + "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" }, "is-regex": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.5.tgz", - "integrity": "sha512-vlKW17SNq44owv5AQR3Cq0bQPEb8+kF3UKZ2fiZNOWtztYE5i0CzCZxFDwO58qAOWtxdBRVO/V5Qin1wjCqFYQ==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.1.tgz", + "integrity": "sha512-1+QkEcxiLlB7VEyFtyBg94e08OAsvq7FUBgApTq/w2ymCLyKJgDPsybBENVtA7XCQEgEXxKPonG+mvYRxh/LIg==", "requires": { - "has": "^1.0.3" + "has-symbols": "^1.0.1" } }, "is-resolvable": { @@ -5933,9 +5812,9 @@ "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" }, "js-base64": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.5.2.tgz", - "integrity": "sha512-Vg8czh0Q7sFBSUMWWArX/miJeBWYBPpdU/3M/DKSaekLMqrqVPaedp+5mZhie/r0lgrcaYBfwXatEew6gwgiQQ==" + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.6.4.tgz", + "integrity": "sha512-pZe//GGmwJndub7ZghVHz7vjb2LgC1m8B07Au3eYqeqv9emhESByMXxaEgkUkEqJe87oBbSniGYoQNIBklc7IQ==" }, "js-stringify": { "version": "1.0.2", @@ -5948,9 +5827,9 @@ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", "requires": { "argparse": "^1.0.7", "esprima": "^4.0.0" @@ -6090,27 +5969,6 @@ "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", "integrity": "sha1-odePw6UEdMuAhF07O24dpJpEbo4=" }, - "lcid": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lcid/-/lcid-2.0.0.tgz", - "integrity": "sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA==", - "requires": { - "invert-kv": "^2.0.0" - } - }, - "leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" - }, - "levenary": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz", - "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==", - "requires": { - "leven": "^3.1.0" - } - }, "linkify-it": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", @@ -6140,18 +5998,18 @@ } }, "locate-path": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", - "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "requires": { - "p-locate": "^2.0.0", + "p-locate": "^3.0.0", "path-exists": "^3.0.0" } }, "lodash": { - "version": "4.17.15", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", - "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==" }, "lodash._reinterpolate": { "version": "3.0.0", @@ -6216,23 +6074,15 @@ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "loglevel": { - "version": "1.6.7", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.7.tgz", - "integrity": "sha512-cY2eLFrQSAfVPhCgH1s7JI73tMbg9YC3v3+ZHVW67sBS7UxWzNEk/ZBbSfLykBWHp33dqqtOv82gjhKEi81T/A==" + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.7.0.tgz", + "integrity": "sha512-i2sY04nal5jDcagM3FMfG++T69GEEM8CYuOfeOIvmXzOIcwE9a/CJPR0MFM97pYMj/u10lzz7/zd7+qwhrBTqQ==" }, "longest": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", "integrity": "sha1-MKCy2jj3N3DoKUoNIuZiXtd9AJc=" }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, "lower-case": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-1.1.4.tgz", @@ -6267,14 +6117,6 @@ } } }, - "map-age-cleaner": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", - "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", - "requires": { - "p-defer": "^1.0.0" - } - }, "map-cache": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", @@ -6289,26 +6131,46 @@ } }, "markdown-it": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-10.0.0.tgz", - "integrity": "sha512-YWOP1j7UbDNz+TumYP1kpwnP0aEa711cJjrAQrzd0UXlbJfc5aAq0F/PZHjiioqDC1NKgvIMX+o+9Bk7yuM2dg==", + "version": "12.0.2", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.0.2.tgz", + "integrity": "sha512-4Lkvjbv2kK+moL9TbeV+6/NHx+1Q+R/NIdUlFlkqkkzUcTod4uiyTJRiBidKR9qXSdkNFkgv+AELY8KN9vSgVA==", "requires": { - "argparse": "^1.0.7", + "argparse": "^2.0.1", "entities": "~2.0.0", - "linkify-it": "^2.0.0", + "linkify-it": "^3.0.1", "mdurl": "^1.0.1", "uc.micro": "^1.0.5" + }, + "dependencies": { + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "entities": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", + "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==" + }, + "linkify-it": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.2.tgz", + "integrity": "sha512-gDBO4aHNZS6coiZCKVhSNh43F9ioIL4JwRjLZPkoLIY4yZFwg264Y5lu2x6rb1Js42Gh6Yqm2f6L2AJcnkzinQ==", + "requires": { + "uc.micro": "^1.0.1" + } + } } }, "markdown-it-anchor": { - "version": "5.2.7", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.2.7.tgz", - "integrity": "sha512-REFmIaSS6szaD1bye80DMbp7ePwsPNvLTR5HunsUcZ0SG0rWJQ+Pz24R4UlTKtjKBPhxo0v0tOBDYjZQQknW8Q==" + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.3.0.tgz", + "integrity": "sha512-/V1MnLL/rgJ3jkMWo84UR+K+jF1cxNG1a+KwqeXqTIJ+jtA8aWSHuigx8lTzauiIjBDbwF3NcWQMotd0Dm39jA==" }, "markdown-it-attrs": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.2.tgz", - "integrity": "sha512-q45vdXU9TSWaHgFkWEFM97YHEoCmOyG9csLLdv3oVC6ARjT77u4wfng9rRtSOMb5UpxzT7zTX5GBbwm15H40dw==" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-it-attrs/-/markdown-it-attrs-3.0.3.tgz", + "integrity": "sha512-cLnICU2t61skNCr4Wih/sdza+UbQcqJGZwvqAypnbWA284nzDm+Gpc90iaRk/JjsIy4emag5v3s0rXFhFBWhCA==" }, "markdown-it-chain": { "version": "1.3.0", @@ -6369,16 +6231,6 @@ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" }, - "mem": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/mem/-/mem-4.3.0.tgz", - "integrity": "sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w==", - "requires": { - "map-age-cleaner": "^0.1.1", - "mimic-fn": "^2.0.0", - "p-is-promise": "^2.0.0" - } - }, "memory-fs": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.4.1.tgz", @@ -6431,9 +6283,9 @@ } }, "merge2": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.3.0.tgz", - "integrity": "sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw==" + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==" }, "methods": { "version": "1.1.2", @@ -6486,31 +6338,33 @@ "requires": { "bn.js": "^4.0.0", "brorand": "^1.0.1" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "mime": { - "version": "2.4.4", - "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.4.tgz", - "integrity": "sha512-LRxmNwziLPT828z+4YkNzloCFC2YM4wrB99k+AV5ZbEyfGNWfG8SO1FUXLmLDBSo89NrJZ4DIWeLjy1CHGhMGA==" + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/mime/-/mime-2.4.6.tgz", + "integrity": "sha512-RZKhC3EmpBchfTGBVb8fb+RL2cWyw/32lshnsETttkBAyAUXSGHxbEJWWRXc751DrIxG1q04b8QwMbAwkRPpUA==" }, "mime-db": { - "version": "1.43.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.43.0.tgz", - "integrity": "sha512-+5dsGEEovYbT8UY9yD7eE4XTc4UwJ1jBYlgaQQF38ENsKR3wj/8q8RFZrF9WIZpB2V1ArTVFUva8sAul1NzRzQ==" + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==" }, "mime-types": { - "version": "2.1.26", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.26.tgz", - "integrity": "sha512-01paPWYgLrkqAyrlDorC1uDwl2p3qZT7yl806vW7DvDoxwXi46jsjFbg+WdwotBIk6/MbEhO/dh5aZ5sNj/dWQ==", + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", "requires": { - "mime-db": "1.43.0" + "mime-db": "1.44.0" } }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" - }, "mimic-response": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", @@ -6607,12 +6461,9 @@ } }, "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "requires": { - "minimist": "^1.2.5" - } + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" }, "move-concurrently": { "version": "1.0.1", @@ -6625,6 +6476,16 @@ "mkdirp": "^0.5.1", "rimraf": "^2.5.4", "run-queue": "^1.0.3" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + } } }, "ms": { @@ -6647,9 +6508,9 @@ "integrity": "sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE=" }, "nan": { - "version": "2.14.0", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.0.tgz", - "integrity": "sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg==", + "version": "2.14.2", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", + "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", "optional": true }, "nanomatch": { @@ -6695,9 +6556,9 @@ "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==" }, "neo-async": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", - "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, "nice-try": { "version": "1.0.5", @@ -6713,9 +6574,9 @@ } }, "node-forge": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.9.0.tgz", - "integrity": "sha512-7ASaDa3pD+lJ3WvXFsxekJQelBKRpne+GOVbLbtHYdd7pFspyeuJHnWfLplGf3SwKGbfs/aYl5V/JCIaHVUKKQ==" + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-0.10.0.tgz", + "integrity": "sha512-PPmu8eEeG9saEUvI97fm4OYxXVB6bFvyNTyiUOBichBpFG8A1Ljw3bY62+5oOjDEMHRnd0Y7HQ+x7uzxOzC6JA==" }, "node-libs-browser": { "version": "2.2.1", @@ -6784,9 +6645,9 @@ } }, "node-releases": { - "version": "1.1.53", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.53.tgz", - "integrity": "sha512-wp8zyQVwef2hpZ/dJH7SfSrIPD6YoJz6BDQDpGEkcA0s3LpAQoxBIYmfIq6QAhC1DhwsyCgTaTTcONwX8qzCuQ==" + "version": "1.1.65", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.65.tgz", + "integrity": "sha512-YpzJOe2WFIW0V4ZkJQd/DGR/zdVwc/pI4Nl1CZrBO19FdRcSTmsuhdttw9rsTzzJLrNcSloLiBbEYx1C4f6gpA==" }, "nopt": { "version": "1.0.10", @@ -6842,11 +6703,6 @@ "resolved": "https://registry.npmjs.org/num2fraction/-/num2fraction-1.2.2.tgz", "integrity": "sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4=" }, - "number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" - }, "oauth-sign": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", @@ -6886,14 +6742,39 @@ } }, "object-inspect": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.7.0.tgz", - "integrity": "sha512-a7pEHdh1xKIAgTySUGgLMx/xwDZskN1Ud6egYYN3EdRW4ZMPNEDUTF+hwy2LUC+Bl+SyLXANnwz/jyh/qutKUw==" + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.8.0.tgz", + "integrity": "sha512-jLdtEOB112fORuypAyl/50VRVIBIdVQOSUUGQHzJ4xBSbit81zRarz7GThkEFZy1RceYrWYcPcBFPQwHyAc1gA==" }, "object-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.0.2.tgz", - "integrity": "sha512-Epah+btZd5wrrfjkJZq1AOB9O6OxUQto45hzFd7lXGrpHPGE0W1k+426yrZV+k6NJOzLNNW/nVsmZdIWsAqoOQ==" + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.3.tgz", + "integrity": "sha512-teyqLvFWzLkq5B9ki8FVWA902UER2qkxmdA4nLf+wjOLAWgxzCWZNCxpDq9MvE8MmhWNr+I8w3BN49Vx36Y6Xg==", + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.18.0-next.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } + } }, "object-keys": { "version": "1.1.1", @@ -6909,14 +6790,14 @@ } }, "object.assign": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", - "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.2.tgz", + "integrity": "sha512-ixT2L5THXsApyiUPYKmW+2EHpXXe5Ii3M+f4e+aJFAHao5amFRW6J0OO6c/LU8Be47utCx2GL89hxGB6XSmKuQ==", "requires": { - "define-properties": "^1.1.2", - "function-bind": "^1.1.1", - "has-symbols": "^1.0.0", - "object-keys": "^1.0.11" + "call-bind": "^1.0.0", + "define-properties": "^1.1.3", + "has-symbols": "^1.0.1", + "object-keys": "^1.1.1" } }, "object.getownpropertydescriptors": { @@ -6974,9 +6855,9 @@ } }, "opencollective-postinstall": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.2.tgz", - "integrity": "sha512-pVOEP16TrAO2/fjej1IdOyupJY8KDUM1CvsaScRbw6oddvpQoOfGk4ywha0HKKVAD6RkW4x6Q+tNBwhf3Bgpuw==" + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz", + "integrity": "sha512-8AV/sCtuzUeTo8gQK5qDZzARrulB3egtLzFgteqB2tcT4Mw7B8Kt7JcDHmltjz6FOAHsvTevk70gZEbhM4ZS9Q==" }, "opn": { "version": "5.5.0", @@ -6987,9 +6868,9 @@ } }, "optimize-css-assets-webpack-plugin": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.3.tgz", - "integrity": "sha512-q9fbvCRS6EYtUKKSwI87qm2IxlyJK5b4dygW1rKUBT6mMDhdG5e5bZT63v6tnJR9F9FB/H5a0HTmtw+laUBxKA==", + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.4.tgz", + "integrity": "sha512-wqd6FdI2a5/FdoiCNNkEvLeA//lHHfG24Ln2Xm2qqdIk4aOlsR18jwpyOihqQ8849W3qu2DX8fOYxpvTMj+93A==", "requires": { "cssnano": "^4.1.10", "last-call-webpack-plugin": "^3.0.0" @@ -7008,50 +6889,30 @@ "resolved": "https://registry.npmjs.org/os-browserify/-/os-browserify-0.3.0.tgz", "integrity": "sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc=" }, - "os-locale": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-3.1.0.tgz", - "integrity": "sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q==", - "requires": { - "execa": "^1.0.0", - "lcid": "^2.0.0", - "mem": "^4.0.0" - } - }, "p-cancelable": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==" }, - "p-defer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", - "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=" - }, "p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" }, - "p-is-promise": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-2.1.0.tgz", - "integrity": "sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg==" - }, "p-limit": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", - "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "requires": { - "p-try": "^1.0.0" + "p-try": "^2.0.0" } }, "p-locate": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", - "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", "requires": { - "p-limit": "^1.1.0" + "p-limit": "^2.0.0" } }, "p-map": { @@ -7068,9 +6929,9 @@ } }, "p-try": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", - "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" }, "package-json": { "version": "6.5.0", @@ -7136,13 +6997,12 @@ } }, "parse-asn1": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.5.tgz", - "integrity": "sha512-jkMYn1dcJqF6d5CpU689bq7w/b5ALS9ROVSpQDPrZsqqesUJii9qutvoT5ltGedNXMO2e16YUWIghG9KxaViTQ==", + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/parse-asn1/-/parse-asn1-5.1.6.tgz", + "integrity": "sha512-RnZRo1EPU6JBnra2vGHj0yhp6ebyjBZpmUCLHWiFhxlzvBCCpAuZ7elsBp1PVAbQN0/04VD/19rfzlBSwLstMw==", "requires": { - "asn1.js": "^4.0.0", + "asn1.js": "^5.2.0", "browserify-aes": "^1.0.0", - "create-hash": "^1.1.0", "evp_bytestokey": "^1.0.0", "pbkdf2": "^3.0.3", "safe-buffer": "^5.1.1" @@ -7231,9 +7091,9 @@ } }, "pbkdf2": { - "version": "3.0.17", - "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.0.17.tgz", - "integrity": "sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.1.tgz", + "integrity": "sha512-4Ejy1OPxi9f2tt1rRV7Go7zmfDQ+ZectEQz3VGUQhgq62HtIRPDyG/JtnwIxs6x3uNMwo2V7q1fMvKjb+Tnpqg==", "requires": { "create-hash": "^1.1.2", "create-hmac": "^1.1.4", @@ -7247,6 +7107,12 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picomatch": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", + "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", + "optional": true + }, "pify": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", @@ -7258,77 +7124,29 @@ "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=" }, "pinkie-promise": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", - "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", - "requires": { - "pinkie": "^2.0.0" - } - }, - "pkg-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", - "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", - "requires": { - "find-up": "^3.0.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - } + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "requires": { + "pinkie": "^2.0.0" } }, - "pkg-up": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-2.0.0.tgz", - "integrity": "sha1-yBmscoBZpGHKscOImivjxJoATX8=", + "pkg-dir": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-3.0.0.tgz", + "integrity": "sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw==", "requires": { - "find-up": "^2.1.0" + "find-up": "^3.0.0" } }, "portfinder": { - "version": "1.0.25", - "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.25.tgz", - "integrity": "sha512-6ElJnHBbxVA1XSLgBp7G1FiCkQdlqGzuF7DswL5tcea+E8UpuvPU7beVAjjRwCioTS9ZluNbu+ZyRvgTsmqEBg==", + "version": "1.0.28", + "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.28.tgz", + "integrity": "sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA==", "requires": { "async": "^2.6.2", "debug": "^3.1.1", - "mkdirp": "^0.5.1" + "mkdirp": "^0.5.5" }, "dependencies": { "debug": { @@ -7339,6 +7157,14 @@ "ms": "^2.1.1" } }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -7352,9 +7178,9 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.27", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.27.tgz", - "integrity": "sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ==", + "version": "7.0.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.35.tgz", + "integrity": "sha512-3QT8bBJeX/S5zKTTjTCIjRF3If4avAT6kqxcASlTWEtAFCb9NH0OUxNDfgZSWdP5fJnBYCMEWkIFfWeugjzYMg==", "requires": { "chalk": "^2.4.2", "source-map": "^0.6.1", @@ -7372,9 +7198,9 @@ } }, "postcss-calc": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.2.tgz", - "integrity": "sha512-rofZFHUg6ZIrvRwPeFktv06GdbDYLcGqh9EwiMutZg+a0oePCCw1zHOEiji6LCpyRcjTREtPASuUqeAvYlEVvQ==", + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.5.tgz", + "integrity": "sha512-1tKHutbGtLtEZF6PT4JSihCHfIVldU72mZ8SdZHIYriIZ9fh9k9aWSppaT8rHsyI3dX+KSR+W+Ix9BMY3AODrg==", "requires": { "postcss": "^7.0.27", "postcss-selector-parser": "^6.0.2", @@ -7449,9 +7275,9 @@ } }, "postcss-load-config": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.0.tgz", - "integrity": "sha512-4pV3JJVPLd5+RueiVVB+gFOAa7GWc25XQcMp86Zexzke69mKf6Nx9LRcQywdz7yZI9n1udOxmLuAwTBypypF8Q==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-2.1.2.tgz", + "integrity": "sha512-/rDeGV6vMUo3mwJZmeHfEDvwnTKKqQ0S7OHUi/kJvvtx3aWtyWG2/0ZWnzCt2keEclwN6Tf0DST2v9kITdOKYw==", "requires": { "cosmiconfig": "^5.0.0", "import-cwd": "^2.0.0" @@ -7849,13 +7675,14 @@ } }, "postcss-selector-parser": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz", - "integrity": "sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg==", + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz", + "integrity": "sha512-gjMeXBempyInaBqpp8gODmwZ52WaYsVOsfr4L4lDQ7n3ncD6mEyySiDtgzCT+NYC0mmeOLvtsF8iaEf0YT6dBw==", "requires": { "cssesc": "^3.0.0", "indexes-of": "^1.0.1", - "uniq": "^1.0.1" + "uniq": "^1.0.1", + "util-deprecate": "^1.0.2" } }, "postcss-svgo": { @@ -7887,9 +7714,9 @@ } }, "postcss-value-parser": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.3.tgz", - "integrity": "sha512-N7h4pG+Nnu5BEIzyeaaIYWs0LI5XC40OrRh5L60z0QjFsqGWcHcbkBvpe1WYpcIS9yQ8sOi/vIPt1ejQCrMVrg==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz", + "integrity": "sha512-97DXOFbQJhk71ne5/Mt6cOu6yxsSfM0QGQyl0L25Gca4yGWEGJaig7l7gbCX623VqTBNGLRLaVUCnNkcedlRSQ==" }, "prepend-http": { "version": "2.0.0", @@ -7903,12 +7730,12 @@ "optional": true }, "pretty-error": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.1.tgz", - "integrity": "sha1-X0+HyPkeWuPzuoerTPXgOxoX8aM=", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-2.1.2.tgz", + "integrity": "sha512-EY5oDzmsX5wvuynAByrmY0P0hcp+QpnAKbJng2A2MPjVKXCxrDSUkzghVJ4ZGPIv+JC4gX8fPUWscC0RtjsWGw==", "requires": { - "renderkid": "^2.0.1", - "utila": "~0.4" + "lodash": "^4.17.20", + "renderkid": "^2.0.4" } }, "pretty-time": { @@ -7917,18 +7744,13 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.20.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.20.0.tgz", - "integrity": "sha512-AEDjSrVNkynnw6A+B1DsFkd6AVdTnp+/WoUixFRULlCLZVRZlVQMVWio/16jv7G1FscUxQxOQhWwApgbnxr6kQ==", + "version": "1.22.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.22.0.tgz", + "integrity": "sha512-lLJ/Wt9yy0AiSYBf212kK3mM5L8ycwlyTlSxHBAneXLR0nzFMlZ5y7riFPF3E33zXOF2IH95xdY5jIyZbM9z/w==", "requires": { "clipboard": "^2.0.0" } }, - "private": { - "version": "0.1.8", - "resolved": "https://registry.npmjs.org/private/-/private-0.1.8.tgz", - "integrity": "sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg==" - }, "process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -7987,6 +7809,13 @@ "parse-asn1": "^5.0.0", "randombytes": "^2.0.1", "safe-buffer": "^5.1.2" + }, + "dependencies": { + "bn.js": { + "version": "4.11.9", + "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-4.11.9.tgz", + "integrity": "sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw==" + } } }, "pug": { @@ -8147,9 +7976,9 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, "pupa": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.0.1.tgz", - "integrity": "sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz", + "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==", "requires": { "escape-goat": "^2.0.0" } @@ -8185,9 +8014,9 @@ "integrity": "sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM=" }, "querystringify": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.1.1.tgz", - "integrity": "sha512-w7fLxIRCRT7U8Qu53jQnJyPkYZIaR4n5151KMfcJlO/A9397Wxb1amJvROTK6TOnp7PfoAmg/qXiNHI+08jRfA==" + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" }, "randombytes": { "version": "2.1.0", @@ -8298,9 +8127,9 @@ } }, "regenerate": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.0.tgz", - "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==" }, "regenerate-unicode-properties": { "version": "8.2.0", @@ -8316,12 +8145,11 @@ "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" }, "regenerator-transform": { - "version": "0.14.4", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.4.tgz", - "integrity": "sha512-EaJaKPBI9GvKpvUz2mz4fhx7WPgvwRLY9v3hlNHWmAuJHI13T4nwKnNvm5RWJzEdnI5g5UwtOww+S8IdoUC2bw==", + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.5.tgz", + "integrity": "sha512-eOf6vka5IO151Jfsw2NO9WpGX58W6wWmefK3I1zEGr0lOD0u8rwPaNqQL1aRxUaxLeKO3ArNh3VYg1KbaD+FFw==", "requires": { - "@babel/runtime": "^7.8.4", - "private": "^0.1.8" + "@babel/runtime": "^7.8.4" } }, "regex-not": { @@ -8362,9 +8190,9 @@ } }, "regexpu-core": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.0.tgz", - "integrity": "sha512-TQ4KXRnIn6tz6tjnrXEkD/sshygKH/j5KzK86X8MkeHyZ8qst/LZ89j3X4/8HEIfHANTFIP/AbXakeRhWIl5YQ==", + "version": "4.7.1", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.1.tgz", + "integrity": "sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ==", "requires": { "regenerate": "^1.4.0", "regenerate-unicode-properties": "^8.2.0", @@ -8375,9 +8203,9 @@ } }, "registry-auth-token": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.1.1.tgz", - "integrity": "sha512-9bKS7nTl9+/A1s7tnPeGrUpRcVY+LUh7bfFgzpndALdPfXQBfQV77rQVtqgUV3ti4vc/Ik81Ex8UJDWDQ12zQA==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.0.tgz", + "integrity": "sha512-P+lWzPrsgfN+UEpDS3U8AQKg/UjZX6mQSJueZj3EK+vNESoqBSpBUD3gmu4sF9lOsjXWjF11dQKUqemf3veq1w==", "requires": { "rc": "^1.2.8" } @@ -8391,9 +8219,9 @@ } }, "regjsgen": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.1.tgz", - "integrity": "sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==" + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/regjsgen/-/regjsgen-0.5.2.tgz", + "integrity": "sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A==" }, "regjsparser": { "version": "0.6.4", @@ -8421,15 +8249,15 @@ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" }, "renderkid": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.3.tgz", - "integrity": "sha512-z8CLQp7EZBPCwCnncgf9C4XAi3WR0dv+uWu/PjIyhhAb5d6IJ/QZqlHFprHeKT+59//V6BNUsLbvN8+2LarxGA==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-2.0.4.tgz", + "integrity": "sha512-K2eXrSOJdq+HuKzlcjOlGoOarUu5SDguDEhE7+Ah4zuOWL40j8A/oHvLlLob9PSTNvVnBd+/q0Er1QfpEuem5g==", "requires": { "css-select": "^1.1.0", "dom-converter": "^0.2", "htmlparser2": "^3.3.0", - "strip-ansi": "^3.0.0", - "utila": "^0.4.0" + "lodash": "^4.17.20", + "strip-ansi": "^3.0.0" } }, "repeat-element": { @@ -8482,9 +8310,9 @@ "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" }, "require-main-filename": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-1.0.1.tgz", - "integrity": "sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE=" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", + "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==" }, "requires-port": { "version": "1.0.0", @@ -8492,10 +8320,11 @@ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, "resolve": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", - "integrity": "sha512-84oo6ZTtoTUpjgNEr5SJyzQhzL72gaRodsSfyxC/AXRvwu0Yse9H8eF9IpGo7b8YetZhlI6v7ZQ6bKBFV/6S7w==", + "version": "1.18.1", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.18.1.tgz", + "integrity": "sha512-lDfCPaMKfOJXjy0dPayzPdF1phampNWr3qFCjAu+rw/qbQmr5jWH5xN2hwh9QKfw9E5v4hwV7A+jrCmL8yjjqA==", "requires": { + "is-core-module": "^2.0.0", "path-parse": "^1.0.6" } }, @@ -8579,9 +8408,9 @@ } }, "safe-buffer": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.0.tgz", - "integrity": "sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==" + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" }, "safe-regex": { "version": "1.1.0", @@ -8602,12 +8431,13 @@ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" }, "schema-utils": { - "version": "2.6.5", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.6.5.tgz", - "integrity": "sha512-5KXuwKziQrTVHh8j/Uxz+QUbxkaLW9X/86NBlx/gnKgtsZA2GIVMUn17qWhRFwF8jdYb3Dig5hRO/W5mZqy6SQ==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz", + "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==", "requires": { - "ajv": "^6.12.0", - "ajv-keywords": "^3.4.1" + "@types/json-schema": "^7.0.5", + "ajv": "^6.12.4", + "ajv-keywords": "^3.5.2" } }, "section-matter": { @@ -8631,11 +8461,11 @@ "integrity": "sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo=" }, "selfsigned": { - "version": "1.10.7", - "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.7.tgz", - "integrity": "sha512-8M3wBCzeWIJnQfl43IKwOmC4H/RAp50S8DF60znzjW5GVqTcSe2vWclt7hmYVPkKPlHWOu5EaWOMZ2Y6W8ZXTA==", + "version": "1.10.8", + "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-1.10.8.tgz", + "integrity": "sha512-2P4PtieJeEwVgTU9QEcwIRDQ/mXJLX8/+I3ur+Pg16nS8oNbrGxEso9NyYWy8NAmXiNl4dlAp5MwoNeCWzON4w==", "requires": { - "node-forge": "0.9.0" + "node-forge": "^0.10.0" } }, "semver": { @@ -8699,9 +8529,12 @@ } }, "serialize-javascript": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-2.1.2.tgz", - "integrity": "sha512-rs9OggEUF0V4jUSecXazOYsLfu7OGK2qIn3c7IPBiffz32XniEp/TX9Xmc9LQfK2nQ2QKHvZ2oygKUGU0lG4jQ==" + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-4.0.0.tgz", + "integrity": "sha512-GaNA54380uFefWghODBWEGisLZFj00nS5ACs6yHa9nLqlLpVLO8ChDGeKRjZnV4Nh4n0Qi7nhYZD/9fCPzEqkw==", + "requires": { + "randombytes": "^2.1.0" + } }, "serve-index": { "version": "1.9.1", @@ -8951,12 +8784,13 @@ } }, "sockjs": { - "version": "0.3.19", - "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.19.tgz", - "integrity": "sha512-V48klKZl8T6MzatbLlzzRNhMepEys9Y4oGFpypBFFn1gLI/QQ9HtLLyWJNbPlwGLelOVOEijUbTTJeLLI59jLw==", + "version": "0.3.20", + "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.20.tgz", + "integrity": "sha512-SpmVOVpdq0DJc0qArhF3E5xsxvaiqGNb73XfgBpK1y3UD5gs8DSo8aCTsuT5pX8rssdc2NDIzANwP9eCAiSdTA==", "requires": { "faye-websocket": "^0.10.0", - "uuid": "^3.0.1" + "uuid": "^3.4.0", + "websocket-driver": "0.6.5" } }, "sockjs-client": { @@ -9026,9 +8860,9 @@ } }, "source-map-support": { - "version": "0.5.16", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.16.tgz", - "integrity": "sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==", + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", + "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", "requires": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" @@ -9052,11 +8886,11 @@ }, "dependencies": { "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "ms": { @@ -9080,11 +8914,11 @@ }, "dependencies": { "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "requires": { - "ms": "^2.1.1" + "ms": "2.1.2" } }, "ms": { @@ -9291,65 +9125,88 @@ "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" }, "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", "requires": { + "emoji-regex": "^7.0.1", "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" + "strip-ansi": "^5.1.0" }, "dependencies": { "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" }, "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", "requires": { - "ansi-regex": "^3.0.0" + "ansi-regex": "^4.1.0" } } } }, "string.prototype.trimend": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.0.tgz", - "integrity": "sha512-EEJnGqa/xNfIg05SxiPSqRS7S9qwDhYts1TSLR1BQfYUfPe1stofgGKvwERK9+9yf+PpfBMlpBaCHucXGPQfUA==", - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" - } - }, - "string.prototype.trimleft": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.2.tgz", - "integrity": "sha512-gCA0tza1JBvqr3bfAIFJGqfdRTyPae82+KTnm3coDXkZN9wnuW3HjGgN386D7hfv5CHQYCI022/rJPVlqXyHSw==", - "requires": { - "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimstart": "^1.0.0" - } - }, - "string.prototype.trimright": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.2.tgz", - "integrity": "sha512-ZNRQ7sY3KroTaYjRS6EbNiiHrOkjihL9aQE/8gfQ4DtAC/aEBRHFJa44OmoWxGGqXuJlfKkZW4WcXErGr+9ZFg==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.2.tgz", + "integrity": "sha512-8oAG/hi14Z4nOVP0z6mdiVZ/wqjDtWSLygMigTzAb+7aPEDTleeFf+WrF+alzecxIRkckkJVn+dTlwzJXORATw==", "requires": { "define-properties": "^1.1.3", - "es-abstract": "^1.17.5", - "string.prototype.trimend": "^1.0.0" + "es-abstract": "^1.18.0-next.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } } }, "string.prototype.trimstart": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.0.tgz", - "integrity": "sha512-iCP8g01NFYiiBOnwG1Xc3WZLyoo+RuBymwIlWncShXDDJYWN6DbnM3odslBJdgCdRlq94B5s63NWAZlcn2CS4w==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.2.tgz", + "integrity": "sha512-7F6CdBTl5zyu30BJFdzSTlSlLPwODC23Od+iLoVH8X6+3fvDPPuBVVj9iaB1GOsSTSIgVfsfm27R2FGrAPznWg==", "requires": { "define-properties": "^1.1.3", - "es-abstract": "^1.17.5" + "es-abstract": "^1.18.0-next.1" + }, + "dependencies": { + "es-abstract": { + "version": "1.18.0-next.1", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.18.0-next.1.tgz", + "integrity": "sha512-I4UGspA0wpZXWENrdA0uHbnhte683t3qT/1VFH9aX2dA5PPSf6QW5HHXf5HImaqPmjXaVeVk4RGWnaylmV7uAA==", + "requires": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.2", + "is-negative-zero": "^2.0.0", + "is-regex": "^1.1.1", + "object-inspect": "^1.8.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.1", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + } + } } }, "string_decoder": { @@ -9406,17 +9263,17 @@ } }, "stylus": { - "version": "0.54.7", - "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.7.tgz", - "integrity": "sha512-Yw3WMTzVwevT6ZTrLCYNHAFmanMxdylelL3hkWNgPMeTCpMwpV3nXjpOHuBXtFv7aiO2xRuQS6OoAdgkNcSNug==", + "version": "0.54.8", + "resolved": "https://registry.npmjs.org/stylus/-/stylus-0.54.8.tgz", + "integrity": "sha512-vr54Or4BZ7pJafo2mpf0ZcwA74rpuYCZbxrHBsH8kbcXOwSfvBFwsRfpGO5OD5fhG5HDCFW737PKaawI7OqEAg==", "requires": { "css-parse": "~2.0.0", "debug": "~3.1.0", - "glob": "^7.1.3", - "mkdirp": "~0.5.x", + "glob": "^7.1.6", + "mkdirp": "~1.0.4", "safer-buffer": "^2.1.2", "sax": "~1.2.4", - "semver": "^6.0.0", + "semver": "^6.3.0", "source-map": "^0.7.3" }, "dependencies": { @@ -9482,9 +9339,9 @@ } }, "css-what": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.2.1.tgz", - "integrity": "sha512-WwOrosiQTvyms+Ti5ZC5vGEK0Vod3FTt1ca+payZqvKuGJF+dq7bG63DstxtN0dpm6FxY27a/zS3Wten+gEtGw==" + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/css-what/-/css-what-3.4.2.tgz", + "integrity": "sha512-ACUm3L0/jiZTqfzRM3Hi9Q8eZqd6IK37mMWPLz9PJxkLWllYeRf+EHUSHYEtFop2Eqytaq1FizFVh7XfBnXCDQ==" }, "domutils": { "version": "1.7.0", @@ -9494,6 +9351,14 @@ "dom-serializer": "0", "domelementtype": "1" } + }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } } } }, @@ -9503,14 +9368,14 @@ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==" }, "term-size": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.0.tgz", - "integrity": "sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==" + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.1.tgz", + "integrity": "sha512-wK0Ri4fOGjv/XPy8SBHZChl8CM7uMc5VML7SqiQ0zG7+J5Vr+RMQDoHa2CNT6KHUnTGIXH34UDMkPzAUyapBZg==" }, "terser": { - "version": "4.6.11", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.11.tgz", - "integrity": "sha512-76Ynm7OXUG5xhOpblhytE7X58oeNSmC8xnNhjWVo8CksHit0U0kO4hfNbPrrYwowLWFgM2n9L176VNx2QaHmtA==", + "version": "4.8.0", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.8.0.tgz", + "integrity": "sha512-EAPipTNeWsb/3wLPeup1tVPaXfIaU68xMnVdPafIL1TV05OhASArYyIfFvnvJCNrR2NIOvDVNNTFRa+Re2MWyw==", "requires": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -9525,15 +9390,15 @@ } }, "terser-webpack-plugin": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.3.tgz", - "integrity": "sha512-QMxecFz/gHQwteWwSo5nTc6UaICqN1bMedC5sMtUc7y3Ha3Q8y6ZO0iCR8pq4RJC8Hjf0FEPEHZqcMB/+DFCrA==", + "version": "1.4.5", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-1.4.5.tgz", + "integrity": "sha512-04Rfe496lN8EYruwi6oPQkG0vo8C+HT49X687FZnpPF0qMAIHONI6HEXYPKDOE8e5HjXTyKfqRd/agHtH0kOtw==", "requires": { "cacache": "^12.0.2", "find-cache-dir": "^2.1.0", "is-wsl": "^1.1.0", "schema-utils": "^1.0.0", - "serialize-javascript": "^2.1.2", + "serialize-javascript": "^4.0.0", "source-map": "^0.6.1", "terser": "^4.1.2", "webpack-sources": "^1.4.0", @@ -9606,9 +9471,9 @@ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==" }, "timers-browserify": { - "version": "2.0.11", - "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.11.tgz", - "integrity": "sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ==", + "version": "2.0.12", + "resolved": "https://registry.npmjs.org/timers-browserify/-/timers-browserify-2.0.12.tgz", + "integrity": "sha512-9phl76Cqm6FhSX9Xe1ZUAMLtm1BLkKj2Qd5ApyWkXzsMRaA7dgr81kf4wJmQf/hAvg8EEyJxDo3du/0KlhPiKQ==", "requires": { "setimmediate": "^1.0.4" } @@ -9744,9 +9609,9 @@ } }, "tslib": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", - "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==" + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" }, "tty-browserify": { "version": "0.0.0", @@ -9947,9 +9812,9 @@ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==" }, "update-notifier": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.0.tgz", - "integrity": "sha512-w3doE1qtI0/ZmgeoDoARmI5fjDoT93IfKgEGqm26dGUOh8oNpaSTsGNdYRN/SjOuo10jcJGwkEL3mroKzktkew==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.3.tgz", + "integrity": "sha512-Yld6Z0RyCYGB6ckIjffGOSOmHXj1gMeE7aROz4MG+XMkmixBX4jUngrGXNYz7wPKBmtoD4MnBa2Anu7RSKht/A==", "requires": { "boxen": "^4.2.0", "chalk": "^3.0.0", @@ -9967,11 +9832,10 @@ }, "dependencies": { "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "requires": { - "@types/color-name": "^1.1.1", "color-convert": "^2.0.1" } }, @@ -10003,9 +9867,9 @@ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" }, "supports-color": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz", - "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "requires": { "has-flag": "^4.0.0" } @@ -10018,9 +9882,9 @@ "integrity": "sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg=" }, "uri-js": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.0.tgz", + "integrity": "sha512-B0yRTzYdUCCn9n+F4+Gh4yIDtMQcaJsmYBDsTSG8g/OejKBodLQ2IHfN3bM7jUsRXndopT7OIXWdYqc1fjmV6g==", "requires": { "punycode": "^2.1.0" } @@ -10172,9 +10036,9 @@ "integrity": "sha1-wGavtYK7HLQSjWDqkjkulNXp2+w=" }, "vue": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.11.tgz", - "integrity": "sha512-VfPwgcGABbGAue9+sfrD4PuwFar7gPb1yl1UK1MwXoQPAw0BKSqWfoYCT/ThFrdEVWoI51dBuyCoiNU9bZDZxQ==" + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/vue/-/vue-2.6.12.tgz", + "integrity": "sha512-uhmLFETqPPNyuLLbsKz6ioJ4q7AZHzD8ZVFNATNyICSZouqP2Sz0rotWQC8UNBF6VGSCs5abnKJoStA6JbCbfg==" }, "vue-hot-reload-api": { "version": "2.3.4", @@ -10182,9 +10046,9 @@ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" }, "vue-loader": { - "version": "15.9.1", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.1.tgz", - "integrity": "sha512-IaPU2KOPjs/QjMlxFs/TiTtQUSbftQ7lsAvoxe21rtcQohsMhx+1AltXCNhZIpIn46PtODiAgz+o8RbMpKtmJw==", + "version": "15.9.5", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.5.tgz", + "integrity": "sha512-oeMOs2b5o5gRqkxfds10bCx6JeXYTwivRgbb8hzOrcThD2z1+GqEKE3EX9A2SGbsYDf4rXwRg6D5n1w0jO5SwA==", "requires": { "@vue/component-compiler-utils": "^3.1.0", "hash-sum": "^1.0.2", @@ -10194,14 +10058,14 @@ } }, "vue-router": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.1.6.tgz", - "integrity": "sha512-GYhn2ynaZlysZMkFE5oCHRUTqE8BWs/a9YbKpNLi0i7xD6KG1EzDqpHQmv1F5gXjr8kL5iIVS8EOtRaVUEXTqA==" + "version": "3.4.8", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.4.8.tgz", + "integrity": "sha512-3BsR84AqarcmweXjItxw3jwQsiYNssYg090yi4rlzTnCJxmHtkyCvhNz9Z7qRSOkmiV485KkUCReTp5AjNY4wg==" }, "vue-server-renderer": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.11.tgz", - "integrity": "sha512-V3faFJHr2KYfdSIalL+JjinZSHYUhlrvJ9pzCIjjwSh77+pkrsXpK4PucdPcng57+N77pd1LrKqwbqjQdktU1A==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/vue-server-renderer/-/vue-server-renderer-2.6.12.tgz", + "integrity": "sha512-3LODaOsnQx7iMFTBLjki8xSyOxhCtbZ+nQie0wWY4iOVeEtTg1a3YQAjd82WvKxrWHHTshjvLb7OXMc2/dYuxw==", "requires": { "chalk": "^1.1.3", "hash-sum": "^1.0.2", @@ -10209,7 +10073,7 @@ "lodash.template": "^4.5.0", "lodash.uniq": "^4.5.0", "resolve": "^1.2.0", - "serialize-javascript": "^2.1.2", + "serialize-javascript": "^3.1.0", "source-map": "0.5.6" }, "dependencies": { @@ -10230,6 +10094,14 @@ "supports-color": "^2.0.0" } }, + "serialize-javascript": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-3.1.0.tgz", + "integrity": "sha512-JIJT1DGiWmIKhzRsG91aS6Ze4sFUrYbltlkg2onR5OrnNM02Kl/hnY/T4FN2omvyeBbQmMJv+K4cPOpGzOTFBg==", + "requires": { + "randombytes": "^2.1.0" + } + }, "source-map": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.6.tgz", @@ -10252,9 +10124,9 @@ } }, "vue-template-compiler": { - "version": "2.6.11", - "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.11.tgz", - "integrity": "sha512-KIq15bvQDrcCjpGjrAhx4mUlyyHfdmTaoNfeoATHLAiWB+MU3cx4lOzMwrnUh9cCxy0Lt1T11hAFY6TQgroUAA==", + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.6.12.tgz", + "integrity": "sha512-OzzZ52zS41YUbkCBfdXShQTe69j1gQDZ9HIX8miuC9C3rBCk9wIRjLiZZLrmX9V+Ftq/YEyv1JaVr5Y/hNtByg==", "requires": { "de-indent": "^1.0.2", "he": "^1.1.0" @@ -10266,12 +10138,12 @@ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" }, "vuepress": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.4.0.tgz", - "integrity": "sha512-VrBNCCjyrB4EfdIRWTW6uo/xmMzplVsGE/2oGLkgVhWLPCvvSEAcGQhoUKWxRJXk6CdrDCov6jsmu6MA1N3fvw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.7.1.tgz", + "integrity": "sha512-AdA3do1L4DNzeF8sMTE+cSUJ5hR/6f3YujU8DVowi/vFOg/SX2lJF8urvDkZUSXzaAT6aSgkI9L+B6D+i7SJjA==", "requires": { - "@vuepress/core": "^1.4.0", - "@vuepress/theme-default": "^1.4.0", + "@vuepress/core": "1.7.1", + "@vuepress/theme-default": "1.7.1", "cac": "^6.5.6", "envinfo": "^7.2.0", "opencollective-postinstall": "^2.0.2", @@ -10330,10 +10202,11 @@ } }, "vuepress-plugin-container": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.3.tgz", - "integrity": "sha512-5bTtt8PKu9edNoc2Op/sRhCynjT+xKO/VuqwH7ftjdwNZUZMl/ymga7L+5lXCWNOLYAzRHaZAyYV5tY/97cl5g==", + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.5.tgz", + "integrity": "sha512-TQrDX/v+WHOihj3jpilVnjXu9RcTm6m8tzljNJwYhxnJUW0WWQ0hFLcDTqTBwgKIFdEiSxVOmYE+bJX/sq46MA==", "requires": { + "@vuepress/shared-utils": "^1.2.0", "markdown-it-container": "^2.0.0" } }, @@ -10354,40 +10227,59 @@ } }, "vuepress-theme-cosmos": { - "version": "1.0.161", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.161.tgz", - "integrity": "sha512-eKcjz6IYEw4gYH57orf8H0qSd34+40R+Sw71gdwMkNphJRdMTK4hy7uwrjSmK0McpBRK7tEEZYZLR+EGeMIDNg==", - "requires": { - "@cosmos-ui/vue": "^0.22.0", - "@vuepress/plugin-google-analytics": "^1.3.1", - "axios": "^0.19.0", + "version": "1.0.175", + "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.175.tgz", + "integrity": "sha512-QwVVaU1cMEl+j11trOEp2Vw+C3TAU+DQQIK4rcezHwMCsIYm9Wj4yDhz6rZVYd/Rg+KaCgZ1OCiZlcH/CXdu2A==", + "requires": { + "@cosmos-ui/vue": "^0.35.0", + "@vuepress/plugin-google-analytics": "1.7.1", + "algoliasearch": "^4.2.0", + "axios": "^0.21.0", "cheerio": "^1.0.0-rc.3", "clipboard-copy": "^3.1.0", - "entities": "^2.0.0", - "fuse.js": "^3.4.6", + "entities": "2.1.0", + "esm": "^3.2.25", "gray-matter": "^4.0.2", - "hotkeys-js": "^3.7.3", + "hotkeys-js": "3.8.1", "jsonp": "^0.2.1", - "markdown-it": "^10.0.0", - "markdown-it-attrs": "^3.0.1", - "prismjs": "^1.17.1", + "markdown-it": "^12.0.0", + "markdown-it-attrs": "^3.0.3", + "prismjs": "^1.21.0", "pug": "^2.0.4", "pug-plain-loader": "^1.0.0", - "stylus": "^0.54.7", + "stylus": "^0.54.8", "stylus-loader": "^3.0.2", + "tiny-cookie": "^2.3.2", "v-runtime-template": "^1.10.0", - "vuepress": "^1.2.0", + "vuepress": "^1.5.4", "vuepress-plugin-sitemap": "^2.3.1" } }, "watchpack": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.1.tgz", - "integrity": "sha512-+IF9hfUFOrYOOaKyfaI7h7dquUIOgyEMoQMLA7OP5FxegKA2+XdXThAZ9TU2kucfhDH7rfMHs1oPYziVGWRnZA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.0.1.tgz", + "integrity": "sha512-vO8AKGX22ZRo6PiOFM9dC0re8IcKh8Kd/aH2zeqUc6w4/jBGlTy2P7fTC6ekT0NjVeGjgU2dGC5rNstKkeLEQg==", + "dev": true, "requires": { - "chokidar": "^2.1.8", - "graceful-fs": "^4.1.2", - "neo-async": "^2.5.0" + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "dependencies": { + "glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "dev": true + } + } + }, + "watchpack-chokidar2": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz", + "integrity": "sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA==", + "optional": true, + "requires": { + "chokidar": "^2.1.8" } }, "wbuf": { @@ -10404,19 +10296,19 @@ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" }, "webpack": { - "version": "4.42.1", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.42.1.tgz", - "integrity": "sha512-SGfYMigqEfdGchGhFFJ9KyRpQKnipvEvjc1TwrXEPCM6H5Wywu10ka8o3KGrMzSMxMQKt8aCHUFh5DaQ9UmyRg==", + "version": "4.44.2", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.44.2.tgz", + "integrity": "sha512-6KJVGlCxYdISyurpQ0IPTklv+DULv05rs2hseIXer6D7KrUicRDLFb4IUM1S6LUAKypPM/nSiVSuv8jHu1m3/Q==", "requires": { "@webassemblyjs/ast": "1.9.0", "@webassemblyjs/helper-module-context": "1.9.0", "@webassemblyjs/wasm-edit": "1.9.0", "@webassemblyjs/wasm-parser": "1.9.0", - "acorn": "^6.2.1", + "acorn": "^6.4.1", "ajv": "^6.10.2", "ajv-keywords": "^3.4.1", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^4.1.0", + "enhanced-resolve": "^4.3.0", "eslint-scope": "^4.0.3", "json-parse-better-errors": "^1.0.2", "loader-runner": "^2.4.0", @@ -10429,14 +10321,111 @@ "schema-utils": "^1.0.0", "tapable": "^1.1.3", "terser-webpack-plugin": "^1.4.3", - "watchpack": "^1.6.0", + "watchpack": "^1.7.4", "webpack-sources": "^1.4.1" }, "dependencies": { "acorn": { - "version": "6.4.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", - "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==" + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", + "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==" + }, + "anymatch": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", + "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", + "optional": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "binary-extensions": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz", + "integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==", + "optional": true + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "optional": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "chokidar": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.3.tgz", + "integrity": "sha512-DtM3g7juCXQxFVSNPNByEC2+NImtBuxQQvWlHunpJIS5Ocr0lG306cC7FCi7cEA0fzmybPUIl4txBIobk1gGOQ==", + "optional": true, + "requires": { + "anymatch": "~3.1.1", + "braces": "~3.0.2", + "fsevents": "~2.1.2", + "glob-parent": "~5.1.0", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.5.0" + } + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "optional": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "fsevents": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", + "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", + "optional": true + }, + "glob-parent": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz", + "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==", + "optional": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "optional": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "optional": true + }, + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + }, + "readdirp": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz", + "integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==", + "optional": true, + "requires": { + "picomatch": "^2.2.1" + } }, "schema-utils": { "version": "1.0.0", @@ -10447,13 +10436,33 @@ "ajv-errors": "^1.0.0", "ajv-keywords": "^3.1.0" } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "optional": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "watchpack": { + "version": "1.7.4", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.7.4.tgz", + "integrity": "sha512-aWAgTW4MoSJzZPAicljkO1hsi1oKj/RRq/OJQh2PKI2UKL04c2Bs+MBOB+BBABHTXJpf9mCwHN7ANCvYsvY2sg==", + "requires": { + "chokidar": "^3.4.1", + "graceful-fs": "^4.1.2", + "neo-async": "^2.5.0", + "watchpack-chokidar2": "^2.0.0" + } } } }, "webpack-chain": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.4.0.tgz", - "integrity": "sha512-f97PYqxU+9/u0IUqp/ekAHRhBD1IQwhBv3wlJo2nvyELpr2vNnUqO3XQEk+qneg0uWGP54iciotszpjfnEExFA==", + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/webpack-chain/-/webpack-chain-6.5.1.tgz", + "integrity": "sha512-7doO/SRtLu8q5WM0s7vPKPWX580qhi0/yBHkOxNkv50f6qB76Zy9o2wRTrrPULqYTvQlVHuvbA8v+G5ayuUDsA==", "requires": { "deepmerge": "^1.5.2", "javascript-stringify": "^2.0.1" @@ -10476,12 +10485,22 @@ "mkdirp": "^0.5.1", "range-parser": "^1.2.1", "webpack-log": "^2.0.0" + }, + "dependencies": { + "mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "requires": { + "minimist": "^1.2.5" + } + } } }, "webpack-dev-server": { - "version": "3.10.3", - "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.10.3.tgz", - "integrity": "sha512-e4nWev8YzEVNdOMcNzNeCN947sWJNd43E5XvsJzbAL08kGc2frm1tQ32hTJslRS+H65LCb/AaUCYU7fjHCpDeQ==", + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-3.11.0.tgz", + "integrity": "sha512-PUxZ+oSTxogFQgkTtFndEtJIPNmml7ExwufBZ9L2/Xyyd5PnOL5UreWe5ZT7IU25DSdykL9p1MLQzmLh2ljSeg==", "requires": { "ansi-html": "0.0.7", "bonjour": "^3.5.0", @@ -10491,72 +10510,64 @@ "debug": "^4.1.1", "del": "^4.1.1", "express": "^4.17.1", - "html-entities": "^1.2.1", + "html-entities": "^1.3.1", "http-proxy-middleware": "0.19.1", "import-local": "^2.0.0", "internal-ip": "^4.3.0", "ip": "^1.1.5", "is-absolute-url": "^3.0.3", "killable": "^1.0.1", - "loglevel": "^1.6.6", + "loglevel": "^1.6.8", "opn": "^5.5.0", "p-retry": "^3.0.1", - "portfinder": "^1.0.25", + "portfinder": "^1.0.26", "schema-utils": "^1.0.0", "selfsigned": "^1.10.7", "semver": "^6.3.0", "serve-index": "^1.9.1", - "sockjs": "0.3.19", + "sockjs": "0.3.20", "sockjs-client": "1.4.0", - "spdy": "^4.0.1", + "spdy": "^4.0.2", "strip-ansi": "^3.0.1", "supports-color": "^6.1.0", "url": "^0.11.0", "webpack-dev-middleware": "^3.7.2", "webpack-log": "^2.0.0", "ws": "^6.2.1", - "yargs": "12.0.5" + "yargs": "^13.3.2" }, "dependencies": { "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" }, "cliui": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-4.1.0.tgz", - "integrity": "sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-5.0.0.tgz", + "integrity": "sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA==", "requires": { - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0", - "wrap-ansi": "^2.0.0" + "string-width": "^3.1.0", + "strip-ansi": "^5.2.0", + "wrap-ansi": "^5.1.0" }, "dependencies": { "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", "requires": { - "ansi-regex": "^3.0.0" + "ansi-regex": "^4.1.0" } } } }, "debug": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", - "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", - "requires": { - "ms": "^2.1.1" - } - }, - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", + "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", "requires": { - "locate-path": "^3.0.0" + "ms": "2.1.2" } }, "is-absolute-url": { @@ -10564,41 +10575,11 @@ "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-3.0.3.tgz", "integrity": "sha512-opmNIX7uFnS96NtPmhWQgQx6/NYFgsUXYMllcfzwWKUMwfo8kku1TvE6hkNcH+Q1ts5cMVrsY7j0bxXQDciu9Q==" }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" - }, "schema-utils": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", @@ -10618,22 +10599,20 @@ } }, "yargs": { - "version": "12.0.5", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-12.0.5.tgz", - "integrity": "sha512-Lhz8TLaYnxq/2ObqHDql8dX8CJi97oHxrjUcYtzKbbykPtVW9WB+poxI+NM2UIzsMgNCZTIf0AQwsjK5yMAqZw==", + "version": "13.3.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-13.3.2.tgz", + "integrity": "sha512-AX3Zw5iPruN5ie6xGRIDgqkT+ZhnRlZMLMHAs8tg7nRruy2Nb+i5o9bwghAogtM08q1dpr2LVoS8KSTMYpWXUw==", "requires": { - "cliui": "^4.0.0", - "decamelize": "^1.2.0", + "cliui": "^5.0.0", "find-up": "^3.0.0", - "get-caller-file": "^1.0.1", - "os-locale": "^3.0.0", + "get-caller-file": "^2.0.1", "require-directory": "^2.1.1", - "require-main-filename": "^1.0.1", + "require-main-filename": "^2.0.0", "set-blocking": "^2.0.0", - "string-width": "^2.0.0", + "string-width": "^3.0.0", "which-module": "^2.0.0", - "y18n": "^3.2.1 || ^4.0.0", - "yargs-parser": "^11.1.1" + "y18n": "^4.0.0", + "yargs-parser": "^13.1.2" } } } @@ -10677,57 +10656,20 @@ "std-env": "^2.2.1", "text-table": "^0.2.0", "wrap-ansi": "^5.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "requires": { - "ansi-regex": "^4.1.0" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", - "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - } - } } }, "websocket-driver": { - "version": "0.7.3", - "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.3.tgz", - "integrity": "sha512-bpxWlvbbB459Mlipc5GBzzZwhoZgGEZLuqPaR0INBGnPAY1vdBX6hPnoFXiw+3yWxDuHyQjO2oXTMyS8A5haFg==", + "version": "0.6.5", + "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.6.5.tgz", + "integrity": "sha1-XLJVbOuF9Dc8bYI4qmkchFThOjY=", "requires": { - "http-parser-js": ">=0.4.0 <0.4.11", - "safe-buffer": ">=5.1.0", "websocket-extensions": ">=0.1.1" } }, "websocket-extensions": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.3.tgz", - "integrity": "sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==" + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz", + "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==" }, "whatwg-url": { "version": "7.1.0", @@ -10828,30 +10770,26 @@ } }, "wrap-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", - "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-5.1.0.tgz", + "integrity": "sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q==", "requires": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1" + "ansi-styles": "^3.2.0", + "string-width": "^3.0.0", + "strip-ansi": "^5.0.0" }, "dependencies": { - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "requires": { - "number-is-nan": "^1.0.0" - } + "ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" + "ansi-regex": "^4.1.0" } } } @@ -10917,9 +10855,9 @@ } }, "yargs-parser": { - "version": "11.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-11.1.1.tgz", - "integrity": "sha512-C6kB/WJDiaxONLJQnF8ccx9SEeoTTLek8RVbaOIsrAUS8VrBEXfmeSnCZxygc+XC2sNMBIwOOnfcxiynjHsVSQ==", + "version": "13.1.2", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-13.1.2.tgz", + "integrity": "sha512-3lbsNRf/j+A4QuSZfDRA7HRSfWrzO0YjqTJd5kjAq37Zep1CEgaYmrH9Q3GwPiB9cHyd1Y1UwggGhJGoxipbzg==", "requires": { "camelcase": "^5.0.0", "decamelize": "^1.2.0" diff --git a/docs/package.json b/docs/package.json index 477ae89fa..3f2efd17b 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,11 +1,13 @@ { "name": "docs", "version": "1.0.0", - "description": "Welcome to the Tendermint Core documentation!", + "description": "Tendermint Core Documentation", "main": "index.js", "dependencies": { - "@vuepress/plugin-google-analytics": "^1.4.1", - "vuepress-theme-cosmos": "^1.0.161" + "vuepress-theme-cosmos": "^1.0.175" + }, + "devDependencies": { + "watchpack": "^2.0.1" }, "scripts": { "preserve": "./pre.sh", diff --git a/docs/pre.sh b/docs/pre.sh index b80346d78..8ce5dac4f 100755 --- a/docs/pre.sh +++ b/docs/pre.sh @@ -1,4 +1,4 @@ #!/bin/bash -cp -a ../rpc/swagger/ .vuepress/public/rpc/ -git clone https://github.com/tendermint/spec.git specRepo && cp -r specRepo/spec . && rm -rf specRepo \ No newline at end of file +cp -a ../rpc/openapi/ .vuepress/public/rpc/ +git clone https://github.com/tendermint/spec.git specRepo && cp -r specRepo/spec . && rm -rf specRepo diff --git a/docs/rfc/README.md b/docs/rfc/README.md new file mode 100644 index 000000000..85461dd6c --- /dev/null +++ b/docs/rfc/README.md @@ -0,0 +1,7 @@ +--- +order: 1 +parent: + order: false +--- + + diff --git a/docs/rfc/rfc-001-end-to-end-testing.md b/docs/rfc/rfc-001-end-to-end-testing.md new file mode 100644 index 000000000..b9fca7be4 --- /dev/null +++ b/docs/rfc/rfc-001-end-to-end-testing.md @@ -0,0 +1,138 @@ +# RFC 001: End-to-End Testing + +## Changelog + +- 2020-09-07: Initial draft (@erikgrinaker) + +- 2020-09-08: Minor improvements (@erikgrinaker) + +## Authors + +- Erik Grinaker (@erikgrinaker) + +## Context + +The current set of end-to-end tests under `test/` are very limited, mostly focusing on P2P testing in a standard configuration. They do not test various configurations (e.g. fast sync reactor versions, state sync, block pruning, genesis vs InitChain setup), nor do they test various network topologies (e.g. sentry node architecture). This leads to poor test coverage, which has caused several serious bugs to go unnoticed. + +We need an end-to-end test suite that can run a large number of combinations of configuration options, genesis settings, network topologies, ABCI interactions, and failure scenarios and check that the network is still functional. This RFC outlines the basic requirements and design considerations, but does not propose a specific implementation - a later ADR will be submitted for this. + +This RFC will not cover comprehensive chaos testing, only a few simple scenarios (e.g. abrupt process termination and network partitioning). Chaos testing of the core consensus algorithm should be implemented e.g. via Jepsen tests or a similar framework, or alternatively be added to these end-to-end tests at a later time. Similarly, malicious or adversarial behavior is out of scope for the first implementation, but may be added later. + +## Proposal + +### Functional Coverage + +The following lists the functionality we would like to test: + +#### Environments + +- **Topology:** single node, 4 nodes (seeds and persistent), sentry architecture, NAT (UPnP) +- **Networking:** IPv4, IPv6 +- **ABCI connection:** UNIX socket, TCP, gRPC +- **PrivVal:** file, UNIX socket, TCP + +#### Node/App Configurations + +- **Database:** goleveldb, cleveldb, boltdb, rocksdb, badgerdb +- **Fast sync:** disabled, v0, v1, v2 +- **State sync:** disabled, enabled +- **Block pruning:** none, keep 20, keep 1, keep random +- **Role:** validator, full node +- **App persistence:** enabled, disabled + +#### Geneses + +- **Validators:** none (InitChain), given +- **Initial height:** 1, 1000 +- **App state:** none, given + +#### Behaviors + +- **Recovery:** stop/start, power cycling, validator outage, network partition, total network loss +- **Validators:** add, remove, change power + +### Functional Combinations + +Running separate tests for all combinations of the above functionality is not feasible, as there are millions of them. However, the functionality can be grouped into three broad classes: + +- **Global:** affects the entire network, needing a separate testnet for each combination (e.g. topology, network protocol, genesis settings) + +- **Local:** affects a single node, and can be varied per node in a testnet (e.g. ABCI/privval connections, database backend, block pruning) + +- **Temporal:** can be run after each other in the same testnet (e.g. recovery and validator changes) + +Thus, we can run separate testnets for all combinations of global options (on the order of 100). In each testnet, we run nodes with randomly generated node configurations optimized for broad coverage (i.e. if one node is using GoLevelDB, then no other node should use it if possible). And in each testnet, we sequentially and randomly pick nodes to stop/start, power cycle, add/remove, disconnect, and so on. + +All of the settings should be specified in a testnet configuration (or alternatively the seed that generated it) such that it can be retrieved from CI and debugged locally. + +A custom ABCI application will have to be built that can exhibit the necessary behavior (e.g. make validator changes, prune blocks, enable/disable persistence, and so on). + +### Test Stages + +Given a test configuration, the test runner has the following stages: + +- **Setup:** configures the Docker containers and networks, but does not start them. + +- **Initialization:** starts the Docker containers, performs fast sync/state sync. + +- **Perturbation:** adds/removes validators, restarts nodes, perturbs networking, etc - liveness and readiness checked between each operation. + +- **Testing:** runs RPC tests independently against all network nodes, making sure data matches expectations and invariants hold. + +### Tests + +The general approach will be to put the network through a sequence of operations (see stages above), check basic liveness and readiness after each operation, and then once the network stabilizes run an RPC test suite against each node in the network. + +The test suite will do black-box testing against a single node's RPC service. We will be testing the behavior of the network as a whole, e.g. that a fast synced node correctly catches up to the chain head and serves basic block data via RPC. Thus the tests will not send e.g. P2P messages or examine the node database, as these are considered internal implementation details - if the network behaves correctly, presumably the internal components function correctly. Comprehensive component testing (e.g. each and every RPC method parameter) should be done via unit/integration tests. + +The tests must take into account the node configuration (e.g. some nodes may be pruned, others may not be validators), and should somehow be provided access to expected data (i.e. complete block headers for the entire chain). + +The test suite should use the Tendermint RPC client and the Tendermint light client, to exercise the client code as well. + +### Implementation Considerations + +The testnets should run in Docker Compose, both locally and in CI. This makes it easier to reproduce test failures locally. Supporting multiple test-runners (e.g. on VMs or Kubernetes) is out of scope. The same image should be used for all tests, with configuration passed via a mounted volume. + +There does not appear to be any off-the-shelf solutions that would do this for us, so we will have to roll our own on top of Docker Compose. This gives us more flexibility, but is estimated to be a few weeks of work. + +Testnets should be configured via a YAML file. These are used as inputs for the test runner, which e.g. generates Docker Compose configurations from them. An additional layer on top should generate these testnet configurations from a YAML file that specifies all the option combinations to test. + +Comprehensive testnets should run against master nightly. However, a small subset of representative testnets should run for each pull request, e.g. a four-node IPv4 network with state sync and fast sync. + +Tests should be written using the standard Go test framework (and e.g. Testify), with a helper function to fetch info from the test configuration. The test runner will run the tests separately for each network node, and the test must vary its expectations based on the node's configuration. + +It should be possible to launch a specific testnet and run individual test cases from the IDE or local terminal against a it. + +If possible, the existing `testnet` command should be extended to set up the network topologies needed by the end-to-end tests. + +## Status + +Accepted + +## Consequences + +### Positive + +- Comprehensive end-to-end test coverage of basic Tendermint functionality, exercising common code paths in the same way that users would + +- Test environments can easily be reproduced locally and debugged via standard tooling + +### Negative + +- Limited coverage of consensus correctness testing (e.g. Jepsen) + +- No coverage of malicious or adversarial behavior + +- Have to roll our own test framework, which takes engineering resources + +- Possibly slower CI times, depending on which tests are run + +- Operational costs and overhead, e.g. infrastructure costs and system maintenance + +### Neutral + +- No support for alternative infrastructure platforms, e.g. Kubernetes or VMs + +## References + +- [#5291: new end-to-end test suite](https://github.com/tendermint/tendermint/issues/5291) diff --git a/docs/ru/introduction/readme.md b/docs/ru/introduction/readme.md deleted file mode 100644 index 33952c958..000000000 --- a/docs/ru/introduction/readme.md +++ /dev/null @@ -1 +0,0 @@ -# README in russian diff --git a/docs/ru/readme.md b/docs/ru/readme.md deleted file mode 100644 index cc9223496..000000000 --- a/docs/ru/readme.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -parent: - order: false ---- diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index 5e03fee7b..fa94f3a1e 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -7,4 +7,17 @@ parent: # Overview -See the side-bar for details on the various features of Tendermint Core. +This section dives into the internals of Tendermint the implementation. + +- [Using Tendermint](./using-tendermint.md) +- [Configuration](./configuration.md) +- [Running in Production](./running-in-production.md) +- [Metrics](./metrics.md) +- [Validators](./validators.md) +- [Subscribing to events](./subscription.md) +- [Block Structure](./block-structure.md) +- [RPC](./rpc.md) +- [Fast Sync](./fast-sync.md) +- [State Sync](./state-sync.md) +- [Mempool](./mempool.md) +- [Light Client](./light-client.md) diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md index 95a81e588..4563084a6 100644 --- a/docs/tendermint-core/block-structure.md +++ b/docs/tendermint-core/block-structure.md @@ -4,13 +4,13 @@ order: 8 # Block Structure -The tendermint consensus engine records all agreements by a +The Tendermint consensus engine records all agreements by a supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various rpc endpoints, mainly +nodes. This blockchain is accessible via various RPC endpoints, mainly `/block?height=` to get the full block, as well as `/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what exactly is stored in these blocks? -The [specification](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md) contains a detailed description of each component - that's the best place to get started. +The [specification](https://github.com/tendermint/spec/blob/8dd2ed4c6fe12459edeb9b783bdaaaeb590ec15c/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types). diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index 141645f26..4c78d1b39 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -16,7 +16,7 @@ the parameters set with their default values. It will look something like the file below, however, double check by inspecting the `config.toml` created with your version of `tendermint` installed: -``` +```toml # This is a TOML config file. # For more information, see https://github.com/toml-lang/toml @@ -25,7 +25,9 @@ like the file below, however, double check by inspecting the # "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable # or --home cmd flag. -##### main base config options ##### +####################################################################### +### Main Base Config Options ### +####################################################################### # TCP or UNIX socket address of the ABCI application, # or the name of an ABCI application compiled in with the Tendermint binary @@ -39,7 +41,7 @@ moniker = "anonymous" # and verifying their commits fast_sync = true -# Database backend: goleveldb | cleveldb | boltdb | rocksdb +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go # - stable @@ -55,13 +57,16 @@ fast_sync = true # - EXPERIMENTAL # - requires gcc # - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) db_backend = "goleveldb" # Database directory db_dir = "data" # Output level for logging, including package level options -log_level = "main:info,state:info,*:error" +log_level = "main:info,state:info,statesync:info,*:error" # Output format: 'plain' (colored text) or 'json' log_format = "plain" @@ -72,7 +77,10 @@ log_format = "plain" genesis_file = "config/genesis.json" # Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" # TCP or UNIX socket address for Tendermint to listen on for # connections from an external PrivValidator process @@ -84,20 +92,22 @@ node_key_file = "config/node_key.json" # Mechanism to connect to the ABCI application: socket | grpc abci = "socket" -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - # If true, query the ABCI app on connecting to a new peer # so the app can decide if we should keep the connection or not filter_peers = false -##### advanced configuration options ##### -##### rpc server configuration options ##### +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### [rpc] # TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" +laddr = "tcp://127.0.0.1:26657" # A list of origins a cross-domain request can be executed from # Default value '[]' disables cors support @@ -105,10 +115,10 @@ laddr = "tcp://0.0.0.0:26657" cors_allowed_origins = [] # A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST"] +cors_allowed_methods = ["HEAD", "GET", "POST", ] # A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"] +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] # TCP or UNIX socket address for the gRPC server to listen on # NOTE: This server only supports /broadcast_tx_commit @@ -152,25 +162,32 @@ max_subscriptions_per_client = 5 timeout_broadcast_tx_commit = "10s" # Maximum size of request body, in bytes -max_body_bytes = {{ .RPC.MaxBodyBytes }} +max_body_bytes = 1000000 # Maximum size of request header, in bytes -max_header_bytes = {{ .RPC.MaxHeaderBytes }} +max_header_bytes = 1048576 # The path to a file containing certificate that is used to create the HTTPS server. # Migth be either absolute path or path related to tendermint's config directory. # If the certificate is signed by a certificate authority, # the certFile should be the concatenation of the server's certificate, any intermediates, # and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. tls_cert_file = "" # The path to a file containing matching private key that is used to create the HTTPS server. # Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. tls_key_file = "" -##### peer to peer configuration options ##### +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### [p2p] # Address to listen for incoming connections @@ -204,6 +221,12 @@ max_num_inbound_peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers max_num_outbound_peers = 10 +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + # Time to wait before flushing messages out on the connection flush_throttle_timeout = "100ms" @@ -235,7 +258,9 @@ allow_duplicate_ip = false handshake_timeout = "20s" dial_timeout = "3s" -##### mempool configuration options ##### +####################################################### +### Mempool Configurattion Option ### +####################################################### [mempool] recheck = true @@ -254,30 +279,83 @@ max_txs_bytes = 1073741824 cache_size = 10000 # Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. max_tx_bytes = 1048576 -##### fast sync configuration options ##### +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +max_batch_bytes = 10485760 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### [fastsync] # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation # 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability version = "v0" -##### consensus configuration options ##### +####################################################### +### Consensus Configuration Options ### +####################################################### [consensus] wal_file = "data/cs.wal/wal" +# How long we wait for a proposal block before prevoting nil timeout_propose = "3s" +# How much timeout_propose increases with each round timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) timeout_prevote = "1s" +# How much the timeout_prevote increases with each round timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) timeout_precommit = "1s" +# How much the timeout_precommit increases with each round timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). timeout_commit = "1s" +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false @@ -289,42 +367,25 @@ create_empty_blocks_interval = "0s" peer_gossip_sleep_duration = "100ms" peer_query_maj23_sleep_duration = "2s" -# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. -blocktime_iota = "1s" - -##### transactions indexer configuration options ##### +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### [tx_index] # What indexer to use for transactions # +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = "kv" -# Comma-separated list of compositeKeys to index (by default the only key is "tx.hash") -# Remember that Event has the following structure: type.key -# type: [ -# key: value, -# ... -# ] -# -# You can also index transactions by height by adding "tx.height" event here. -# -# It's recommended to index only a subset of keys due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_keys = "" - -# When set to true, tells indexer to index all compositeKeys (predefined keys: -# "tx.hash", "tx.height" and all keys from DeliverTx responses). -# -# Note this may be not desirable (see the comment above). IndexEvents has a -# precedence over IndexAllEvents (i.e. when given both, IndexEvents will be -# indexed). -index_all_keys = false - -##### instrumentation configuration options ##### +####################################################### +### Instrumentation Configuration Options ### +####################################################### [instrumentation] # When true, Prometheus metrics are served under /metrics on @@ -343,17 +404,18 @@ max_open_connections = 3 # Instrumentation namespace namespace = "tendermint" + ``` ## Empty blocks VS no empty blocks -**create_empty_blocks = true** +### create_empty_blocks = true If `create_empty_blocks` is set to `true` in your config, blocks will be created ~ every second (with default consensus parameters). You can regulate the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. -**create_empty_blocks = false** +### create_empty_blocks = false In this setting, blocks are created when transactions received. @@ -361,7 +423,7 @@ Note after the block H, Tendermint creates something we call a "proof block" (only if the application hash changed) H+1. The reason for this is to support proofs. If you have a transaction in block H that changes the state to X, the new application hash will only be included in block H+1. If after your -transaction is committed, you want to get a lite-client proof for the new state +transaction is committed, you want to get a light-client proof for the new state (X), you need the new block to be committed in order to do that because the new block has the new application hash for the state X. That's why we make a new (empty) block if the application hash changes. Otherwise, you won't be able to @@ -382,7 +444,7 @@ production](./running-in-production.md) You can also find more detailed technical explanation in the spec: [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938). -``` +```toml [consensus] ... @@ -414,3 +476,17 @@ Here's a brief summary of the timeouts: - `timeout_commit` = how long we wait after committing a block, before starting on the new height (this gives us a chance to receive some more precommits, even though we already have +2/3) + +## P2P settings + +This section will cover settings within the p2p section of the `config.toml`. + +- `external_address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. +- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks +- `persistent_peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. +- `max_num_inbound_peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). +- `max_num_outbound_peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). +- `unconditional_peer_ids` = is similar to `persistent_peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. +- `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. +- `seed_mode` = is used for when node operators want to run their node as a seed node. Seed node's run a variation of the PeX protocol that disconnects from peers after sending them a list of peers to connect to. To minimize the servers usage, it is recommended to set the mempool's size to 0. +- `private_peer_ids` = is a comma separated list of node ids that you would not like exposed to other peers (ie. you will not tell other peers about the private_peer_ids). This can be filled with a validators node id. diff --git a/docs/tendermint-core/fast-sync.md b/docs/tendermint-core/fast-sync.md index 0b7996b5f..9bbeade38 100644 --- a/docs/tendermint-core/fast-sync.md +++ b/docs/tendermint-core/fast-sync.md @@ -1,5 +1,5 @@ --- -order: 6 +order: 10 --- # Fast Sync @@ -16,11 +16,11 @@ consensus gossip protocol. ## Using Fast Sync -To support faster syncing, tendermint offers a `fast-sync` mode, which +To support faster syncing, Tendermint offers a `fast-sync` mode, which is enabled by default, and can be toggled in the `config.toml` or via `--fast_sync=false`. -In this mode, the tendermint daemon will sync hundreds of times faster +In this mode, the Tendermint daemon will sync hundreds of times faster than if it used the real-time consensus process. Once caught up, the daemon will switch out of fast sync and into the normal consensus mode. After running for some time, the node is considered `caught up` if it @@ -28,5 +28,21 @@ has at least one peer and it's height is at least as high as the max reported peer height. See [the IsCaughtUp method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128). +Note: There are three versions of fast sync. We recommend using v0 as v1 and v2 are still in beta. + If you would like to use a different version you can do so by changing the version in the `config.toml`: + +```toml +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" +``` + If we're lagging sufficiently, we should go back to fast syncing, but this is an [open issue](https://github.com/tendermint/tendermint/issues/129). diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 195a515a5..da2fc6f8b 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -9,7 +9,7 @@ order: 7 We first create three connections (mempool, consensus and query) to the application (running `kvstore` locally in this case). -``` +```sh I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient @@ -18,7 +18,7 @@ I[10-04|13:54:27.367] Starting localClient module=abci-c Then Tendermint Core and the application perform a handshake. -``` +```sh I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD @@ -27,7 +27,7 @@ I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced m After that, we start a few more things like the event switch, reactors, and perform UPNP discover in order to detect the IP address. -``` +```sh I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch I[10-04|13:54:27.375] This node is a validator module=consensus I[10-04|13:54:27.379] Starting Node module=main impl=Node @@ -50,7 +50,7 @@ validator". It also could be just an observer (regular node). Next we replay all the messages from the WAL. -``` +```sh I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight I[10-04|13:54:30.390] Replay: Done module=consensus @@ -58,7 +58,7 @@ I[10-04|13:54:30.390] Replay: Done module=consen "Started node" message signals that everything is ready for work. -``` +```sh I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" ``` @@ -69,7 +69,7 @@ precommits and finally have a chance to commit a block. For details, please refer to [Byzantine Consensus Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). -``` +```sh I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" @@ -138,7 +138,7 @@ little overview what they do. [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). - `rpc` [Tendermint's RPC](./rpc.md). - `rpc-server` RPC server. For implementation details, please read the - [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/lib/doc.go). + [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/jsonrpc/doc.go). - `state` Represents the latest state and execution submodule, which executes blocks against the application. - `types` A collection of the publicly exposed types and methods to diff --git a/docs/tendermint-core/light-client-protocol.md b/docs/tendermint-core/light-client-protocol.md deleted file mode 100644 index 41b7a0e07..000000000 --- a/docs/tendermint-core/light-client-protocol.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -order: 9 ---- - -# Light Client Protocol - -Light clients are an important part of the complete blockchain system for most -applications. Tendermint provides unique speed and security properties for -light client applications. - -See our [lite -package](https://pkg.go.dev/github.com/tendermint/tendermint/lite2?tab=doc). - -## Overview - -The objective of the light client protocol is to get a commit for a recent -block hash where the commit includes a majority of signatures from the last -known validator set. From there, all the application state is verifiable with -[merkle -proofs](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/encoding.md#iavl-tree). - -## Properties - -- You get the full collateralized security benefits of Tendermint; No - need to wait for confirmations. -- You get the full speed benefits of Tendermint; transactions - commit instantly. -- You can get the most recent version of the application state - non-interactively (without committing anything to the blockchain). For - example, this means that you can get the most recent value of a name from the - name-registry without worrying about fork censorship attacks, without posting - a commit and waiting for confirmations. It's fast, secure, and free! - -## Where to obtain trusted height & hash? - -https://pkg.go.dev/github.com/tendermint/tendermint/lite2?tab=doc#TrustOptions - -One way to obtain semi-trusted hash & height is to query multiple full nodes -and compare their hashes: - -```sh -$ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" -{ - "height": "273", - "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" -} -``` - -## HTTP proxy - -Tendermint comes with a built-in `tendermint lite` command, which can be used -to run a light client proxy server, verifying Tendermint rpc. All calls that -can be tracked back to a block header by a proof will be verified before -passing them back to the caller. Other than that, it will present the same -interface as a full Tendermint node. - -```sh -$ tendermint lite supernova -p tcp://233.123.0.140:26657 \ - -w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ - --height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 -``` - -For additional options, run `tendermint lite --help`. diff --git a/docs/tendermint-core/light-client.md b/docs/tendermint-core/light-client.md new file mode 100644 index 000000000..1b07a51e9 --- /dev/null +++ b/docs/tendermint-core/light-client.md @@ -0,0 +1,88 @@ +--- +order: 13 +--- + +# Light Client + +Light clients are an important part of the complete blockchain system for most +applications. Tendermint provides unique speed and security properties for +light client applications. + +See our [light +package](https://pkg.go.dev/github.com/tendermint/tendermint/light?tab=doc). + +## Overview + +The light client protocol verifies headers by retrieving a chain of headers, +commits and validator sets from a trusted height to the target height, verifying +the signatures of each of these intermediary signed headers till it reaches the +target height. From there, all the application state is verifiable with +[merkle proofs](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/encoding.md#iavl-tree). + +## Properties + +- You get the full collateralized security benefits of Tendermint; no + need to wait for confirmations. +- You get the full speed benefits of Tendermint; transactions + commit instantly. +- You can get the most recent version of the application state + non-interactively (without committing anything to the blockchain). For + example, this means that you can get the most recent value of a name from the + name-registry without worrying about fork censorship attacks, without posting + a commit and waiting for confirmations. It's fast, secure, and free! + +## Security + +A light client is initialized from a point of trust using [Trust Options](https://pkg.go.dev/github.com/tendermint/tendermint/light?tab=doc#TrustOptions), +a provider and a set of witnesses. This sets the trust period: the period that +full nodes should be accountable for faulty behavior and a trust level: the +fraction of validators in a validator set with which we trust that at least one +is correct. As Tendermint consensus can withstand 1/3 byzantine faults, this is +the default trust level, however, for greater security you can increase it (max: +1). + +Similar to a full node, light clients can also be subject to byzantine attacks. +A light client also runs a detector process which cross verifies headers from a +primary with witnesses. Therefore light clients should be set with enough witnesses. + +If the light client observes a faulty provider it will report it to another provider +and return an error. + +In summary, the light client is not safe when a) more than the trust level of +validators are malicious and b) all witnesses are malicious. + +## Where to obtain trusted height & hash + +One way to obtain a semi-trusted hash & height is to query multiple full nodes +and compare their hashes: + +```bash +$ curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +{ + "height": "273", + "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" +} +``` + +## Running a light client as an HTTP proxy server + +Tendermint comes with a built-in `tendermint light` command, which can be used +to run a light client proxy server, verifying Tendermint RPC. All calls that +can be tracked back to a block header by a proof will be verified before +passing them back to the caller. Other than that, it will present the same +interface as a full Tendermint node. + +You can start the light client proxy server by running `tendermint light `, +with a variety of flags to specify the primary node, the witness nodes (which cross-check +the information provided by the primary), the hash and height of the trusted header, +and more. + +For example: + +```bash +$ tendermint light supernova -p tcp://233.123.0.140:26657 \ + -w tcp://179.63.29.15:26657,tcp://144.165.223.135:26657 \ + --height=10 --hash=37E9A6DD3FA25E83B22C18835401E8E56088D0D7ABC6FD99FCDC920DD76C1C57 +``` + +For additional options, run `tendermint light --help`. diff --git a/docs/tendermint-core/mempool.md b/docs/tendermint-core/mempool.md index ad1607c6c..8dd968781 100644 --- a/docs/tendermint-core/mempool.md +++ b/docs/tendermint-core/mempool.md @@ -1,5 +1,5 @@ --- -order: 10 +order: 12 --- # Mempool @@ -12,34 +12,37 @@ arrived (via RPC or from other nodes). So the only way to specify the order is to send them to a single node. valA: - - tx1 - - tx2 - - tx3 + +- `tx1` +- `tx2` +- `tx3` If the transactions are split up across different nodes, there's no way to ensure they are processed in the expected order. valA: - - tx1 - - tx2 + +- `tx1` +- `tx2` valB: - - tx3 + +- `tx3` If valB is the proposer, the order might be: - - tx3 - - tx1 - - tx2 +- `tx3` +- `tx1` +- `tx2` If valA is the proposer, the order might be: - - tx1 - - tx2 - - tx3 +- `tx1` +- `tx2` +- `tx3` That said, if the transactions contain some internal value, like an order/nonce/sequence number, the application can reject transactions that are -out of order. So if a node receives tx3, then tx1, it can reject tx3 and then -accept tx1. The sender can then retry sending tx3, which should probably be -rejected until the node has seen tx2. +out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then +accept `tx1`. The sender can then retry sending `tx3`, which should probably be +rejected until the node has seen `tx2`. diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index 57a6cea50..67eb13e0d 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -1,5 +1,5 @@ --- -order: 11 +order: 5 --- # Metrics @@ -18,42 +18,43 @@ Listen address can be changed in the config file (see The following metrics are available: -| **Name** | **Type** | **Since** | **Tags** | **Description** | -| -------------------------------------- | --------- | --------- | ------------- | ---------------------------------------------------------------------- | -| consensus_height | Gauge | 0.21.0 | | Height of the chain | -| consensus_validators | Gauge | 0.21.0 | | Number of validators | -| consensus_validators_power | Gauge | 0.21.0 | | Total voting power of all validators | -| consensus_validator_power | Gauge | 0.33.0 | | Voting power of the node if in the validator set | -| consensus_validator_last_signed_height | Gauge | 0.33.0 | | Last height the node signed a block, if the node is a validator | -| consensus_validator_missed_blocks | Gauge | 0.33.0 | | Total amount of blocks missed for the node, if the node is a validator | -| consensus_missing_validators | Gauge | 0.21.0 | | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | 0.21.0 | | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | 0.21.0 | | Number of rounds | -| consensus_num_txs | Gauge | 0.21.0 | | Number of transactions | -| consensus_total_txs | Gauge | 0.21.0 | | Total number of transactions committed | -| consensus_block_parts | counter | 0.25.0 | peer_id | number of blockparts transmitted by peer | -| consensus_latest_block_height | gauge | 0.25.0 | | /status sync_info number | -| consensus_fast_syncing | gauge | 0.25.0 | | either 0 (not fast syncing) or 1 (syncing) | -| consensus_block_size_bytes | Gauge | 0.21.0 | | Block size in bytes | -| p2p_peers | Gauge | 0.21.0 | | Number of peers node's connected to | -| p2p_peer_receive_bytes_total | counter | 0.25.0 | peer_id, chID | number of bytes per channel received from a given peer | -| p2p_peer_send_bytes_total | counter | 0.25.0 | peer_id, chID | number of bytes per channel sent to a given peer | -| p2p_peer_pending_send_bytes | gauge | 0.25.0 | peer_id | number of pending bytes to be sent to a given peer | -| p2p_num_txs | gauge | 0.25.0 | peer_id | number of transactions submitted by each peer_id | -| p2p_pending_send_bytes | gauge | 0.25.0 | peer_id | amount of data pending to be sent to peer | -| mempool_size | Gauge | 0.21.0 | | Number of uncommitted transactions | -| mempool_tx_size_bytes | histogram | 0.25.0 | | transaction sizes in bytes | -| mempool_failed_txs | counter | 0.25.0 | | number of failed transactions | -| mempool_recheck_times | counter | 0.25.0 | | number of transactions rechecked in the mempool | -| state_block_processing_time | histogram | 0.25.0 | | time between BeginBlock and EndBlock in ms | +| **Name** | **Type** | **Tags** | **Description** | +| -------------------------------------- | --------- | ------------- | ---------------------------------------------------------------------- | +| consensus_height | Gauge | | Height of the chain | +| consensus_validators | Gauge | | Number of validators | +| consensus_validators_power | Gauge | | Total voting power of all validators | +| consensus_validator_power | Gauge | | Voting power of the node if in the validator set | +| consensus_validator_last_signed_height | Gauge | | Last height the node signed a block, if the node is a validator | +| consensus_validator_missed_blocks | Gauge | | Total amount of blocks missed for the node, if the node is a validator | +| consensus_missing_validators | Gauge | | Number of validators who did not sign | +| consensus_missing_validators_power | Gauge | | Total voting power of the missing validators | +| consensus_byzantine_validators | Gauge | | Number of validators who tried to double sign | +| consensus_byzantine_validators_power | Gauge | | Total voting power of the byzantine validators | +| consensus_block_interval_seconds | Histogram | | Time between this and last block (Block.Header.Time) in seconds | +| consensus_rounds | Gauge | | Number of rounds | +| consensus_num_txs | Gauge | | Number of transactions | +| consensus_total_txs | Gauge | | Total number of transactions committed | +| consensus_block_parts | counter | peer_id | number of blockparts transmitted by peer | +| consensus_latest_block_height | gauge | | /status sync_info number | +| consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | +| consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | +| consensus_block_size_bytes | Gauge | | Block size in bytes | +| p2p_peers | Gauge | | Number of peers node's connected to | +| p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | +| p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | +| p2p_peer_pending_send_bytes | gauge | peer_id | number of pending bytes to be sent to a given peer | +| p2p_num_txs | gauge | peer_id | number of transactions submitted by each peer_id | +| p2p_pending_send_bytes | gauge | peer_id | amount of data pending to be sent to peer | +| mempool_size | Gauge | | Number of uncommitted transactions | +| mempool_tx_size_bytes | histogram | | transaction sizes in bytes | +| mempool_failed_txs | counter | | number of failed transactions | +| mempool_recheck_times | counter | | number of transactions rechecked in the mempool | +| state_block_processing_time | histogram | | time between BeginBlock and EndBlock in ms | ## Useful queries Percentage of missing + byzantine validators: -``` +```md ((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 ``` diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index ec3f873c9..62c5b01dd 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -1,5 +1,5 @@ --- -order: 4 +order: 9 --- # RPC diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index d386308de..41f40641e 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -1,5 +1,5 @@ --- -order: 5 +order: 4 --- # Running in production @@ -7,10 +7,10 @@ order: 5 ## Database By default, Tendermint uses the `syndtr/goleveldb` package for its in-process -key-value database. Unfortunately, this implementation of LevelDB seems to suffer under heavy load (see -[#226](https://github.com/syndtr/goleveldb/issues/226)). It may be best to -install the real C-implementation of LevelDB and compile Tendermint to use -that using `make build_c`. See the [install instructions](../introduction/install.md) for details. +key-value database. If you want maximal performance, it may be best to install +the real C-implementation of LevelDB and compile Tendermint to use that using +`make build TENDERMINT_BUILD_OPTIONS=cleveldb`. See the [install +instructions](../introduction/install.md) for details. Tendermint keeps multiple distinct databases in the `$TMROOT/data`: @@ -23,32 +23,24 @@ Tendermint keeps multiple distinct databases in the `$TMROOT/data`: used to temporarily store intermediate results during block processing. - `tx_index.db`: Indexes txs (and their results) by tx hash and by DeliverTx result events. -By default, Tendermint will only index txs by their hash, not by their DeliverTx +By default, Tendermint will only index txs by their hash and height, not by their DeliverTx result events. See [indexing transactions](../app-dev/indexing-transactions.md) for details. -There is no current strategy for pruning the databases. Consider reducing -block production by [controlling empty blocks](../tendermint-core/using-tendermint.md#no-empty-blocks) -or by increasing the `consensus.timeout_commit` param. Note both of these are -local settings and not enforced by the consensus. +Applications can expose block pruning strategies to the node operator. Please read the documentation of your application +to find out more details. -We're working on [state -syncing](https://github.com/tendermint/tendermint/issues/828), -which will enable history to be thrown away -and recent application state to be directly synced. We'll need to develop solutions -for archival nodes that allow queries on historical transactions and states. -The Cosmos project has had much success just dumping the latest state of a -blockchain to disk and starting a new chain from that state. +Applications can use [state sync](state-sync.md) to help nodes bootstrap quickly. ## Logging -Default logging level (`main:info,state:info,*:`) should suffice for +Default logging level (`log_level = "main:info,state:info,statesync:info,*:error"`) should suffice for normal operation mode. Read [this post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756) for details on how to configure `log_level` config variable. Some of the modules can be found [here](./how-to-read-logs.md#list-of-modules). If you're trying to debug Tendermint or asked to provide logs with debug -logging level, you can do so by running tendermint with +logging level, you can do so by running Tendermint with `--log_level="*:debug"`. ## Write Ahead Logs (WAL) @@ -85,9 +77,8 @@ For the above reasons, the `mempool.wal` is disabled by default. To enable, set ## DOS Exposure and Mitigation Validators are supposed to setup [Sentry Node -Architecture](https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb) -to prevent Denial-of-service attacks. You can read more about it -[here](../interviews/tendermint-bft.md). +Architecture](./validators.md) +to prevent Denial-of-service attacks. ### P2P @@ -96,6 +87,11 @@ connection has `MaxPacketMsgPayloadSize`, which is the maximum packet size and bounded send & receive queues. One can impose restrictions on send & receive rate per connection (`SendRate`, `RecvRate`). +The number of open P2P connections can become quite large, and hit the operating system's open +file limit (since TCP connections are considered files on UNIX-based systems). Nodes should be +given a sizable open file limit, e.g. 8192, via `ulimit -n 8192` or other deployment-specific +mechanisms. + ### RPC Endpoints returning multiple entries are limited by default to return 30 @@ -103,8 +99,7 @@ elements (100 max). See the [RPC Documentation](https://docs.tendermint.com/mast for more information. Rate-limiting and authentication are another key aspects to help protect -against DOS attacks. While in the future we may implement these -features, for now, validators are supposed to use external tools like +against DOS attacks. Validators are supposed to use external tools like [NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or [traefik](https://docs.traefik.io/middlewares/ratelimit/) to achieve the same things. @@ -119,7 +114,7 @@ If, after skimming through the logs, things are not clear still, the next thing to try is querying the `/status` RPC endpoint. It provides the necessary info: whenever the node is syncing or not, what height it is on, etc. -```sh +```bash curl http(s)://{ip}:{rpcPort}/status ``` @@ -127,7 +122,7 @@ curl http(s)://{ip}:{rpcPort}/status state (proposer, latest validators, peers states). From it, you should be able to figure out why, for example, the network had halted. -```sh +```bash curl http(s)://{ip}:{rpcPort}/dump_consensus_state ``` @@ -160,7 +155,7 @@ Tendermint also can report and serve Prometheus metrics. See information into an archive. See [Debugging](../tools/debugging.md) for more information. -## What happens when my app dies? +## What happens when my app dies You are supposed to run Tendermint under a [process supervisor](https://en.wikipedia.org/wiki/Process_supervision) (like @@ -175,7 +170,7 @@ order of restart does not matter for it. ## Signal handling We catch SIGINT and SIGTERM and try to clean up nicely. For other -signals we use the default behaviour in Go: [Default behavior of signals +signals we use the default behavior in Go: [Default behavior of signals in Go programs](https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs). @@ -201,11 +196,11 @@ Other causes can be: - Operating system bugs - Admin error (e.g., directly modifying Tendermint data-directory contents) -(Source: https://wiki.postgresql.org/wiki/Corruption) +(Source: ) ### WAL Corruption -If consensus WAL is corrupted at the lastest height and you are trying to start +If consensus WAL is corrupted at the latest height and you are trying to start Tendermint, replay will fail with panic. Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: @@ -215,47 +210,46 @@ Recovering from data corruption can be hard and time-consuming. Here are two app 1) Create a backup of the corrupted WAL file: -``` -cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup -``` + ```sh + cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup + ``` -2. Use `./scripts/wal2json` to create a human-readable version +2) Use `./scripts/wal2json` to create a human-readable version: -``` -./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal -``` + ```sh + ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal + ``` -3. Search for a "CORRUPTED MESSAGE" line. -4. By looking at the previous message and the message after the corrupted one +3) Search for a "CORRUPTED MESSAGE" line. +4) By looking at the previous message and the message after the corrupted one and looking at the logs, try to rebuild the message. If the consequent messages are marked as corrupted too (this may happen if length header got corrupted or some writes did not make it to the WAL ~ truncation), then remove all the lines starting from the corrupted one and restart Tendermint. -``` -$EDITOR /tmp/corrupted_wal -``` + ```sh + $EDITOR /tmp/corrupted_wal + ``` -5. After editing, convert this file back into binary form by running: +5) After editing, convert this file back into binary form by running: -``` -./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal -``` + ```sh + ./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal + ``` ## Hardware ### Processor and Memory -While actual specs vary depending on the load and validators count, -minimal requirements are: +While actual specs vary depending on the load and validators count, minimal +requirements are: - 1GB RAM - 25GB of disk space - 1.4 GHz CPU -SSD disks are preferable for applications with high transaction -throughput. +SSD disks are preferable for applications with high transaction throughput. Recommended: @@ -263,21 +257,34 @@ Recommended: - 100GB SSD - x64 2.0 GHz 2v CPU -While for now, Tendermint stores all the history and it may require -significant disk space over time, we are planning to implement state -syncing (See -[this issue](https://github.com/tendermint/tendermint/issues/828)). So, -storing all the past blocks will not be necessary. +While for now, Tendermint stores all the history and it may require significant +disk space over time, we are planning to implement state syncing (See [this +issue](https://github.com/tendermint/tendermint/issues/828)). So, storing all +the past blocks will not be necessary. + +### Validator signing on 32 bit architectures (or ARM) + +Both our `ed25519` and `secp256k1` implementations require constant time +`uint64` multiplication. Non-constant time crypto can (and has) leaked +private keys on both `ed25519` and `secp256k1`. This doesn't exist in hardware +on 32 bit x86 platforms ([source](https://bearssl.org/ctmul.html)), and it +depends on the compiler to enforce that it is constant time. It's unclear at +this point whenever the Golang compiler does this correctly for all +implementations. + +**We do not support nor recommend running a validator on 32 bit architectures OR +the "VIA Nano 2000 Series", and the architectures in the ARM section rated +"S-".** ### Operating Systems -Tendermint can be compiled for a wide range of operating systems thanks -to Go language (the list of \$OS/\$ARCH pairs can be found +Tendermint can be compiled for a wide range of operating systems thanks to Go +language (the list of \$OS/\$ARCH pairs can be found [here](https://golang.org/doc/install/source#environment)). -While we do not favor any operation system, more secure and stable Linux -server distributions (like Centos) should be preferred over desktop -operation systems (like Mac OS). +While we do not favor any operation system, more secure and stable Linux server +distributions (like Centos) should be preferred over desktop operation systems +(like Mac OS). ### Miscellaneous @@ -296,7 +303,7 @@ If you are going to use Tendermint in a private domain and you have a private high-speed network among your peers, it makes sense to lower flush throttle timeout and increase other params. -``` +```toml [p2p] send_rate=20000000 # 2MB/s @@ -359,9 +366,11 @@ these limits. [Sysctls to tune the system to be able to open more connections](https://github.com/satori-com/tcpkali/blob/master/doc/tcpkali.man.md#sysctls-to-tune-the-system-to-be-able-to-open-more-connections) +The process file limits must also be increased, e.g. via `ulimit -n 8192`. + ...for N connections, such as 50k: -``` +```md kern.maxfiles=10000+2*N # BSD kern.maxfilesperproc=100+2*N # BSD kern.ipc.maxsockets=10000+2*N # BSD diff --git a/docs/tendermint-core/secure-p2p.md b/docs/tendermint-core/secure-p2p.md deleted file mode 100644 index 96df41bc6..000000000 --- a/docs/tendermint-core/secure-p2p.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -order: 12 ---- - -# Secure P2P - -The Tendermint p2p protocol uses an authenticated encryption scheme -based on the [Station-to-Station -Protocol](https://en.wikipedia.org/wiki/Station-to-Station_protocol). - -Each peer generates an ED25519 key-pair to use as a persistent -(long-term) id. - -When two peers establish a TCP connection, they first each generate an -ephemeral X25519 key-pair to use for this session, and send each other -their respective ephemeral public keys. This happens in the clear. - -They then each compute the shared secret, as done in a [diffie hellman -key exhange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange). -The shared secret is used as the symmetric key for the encryption algorithm. - -We then run [hkdf-sha256](https://en.wikipedia.org/wiki/HKDF) to expand the -shared secret to generate a symmetric key for sending data, -a symmetric key for receiving data, -a challenge to authenticate the other party. -One peer will send data with their sending key, and the other peer -would decode it using their own receiving key. -We must ensure that both parties don't try to use the same key as the sending -key, and the same key as the receiving key, as in that case nothing can be -decoded. -To ensure this, the peer with the canonically smaller ephemeral pubkey -uses the first key as their receiving key, and the second key as their sending key. -If the peer has the canonically larger ephemeral pubkey, they do the reverse. - -Each peer also keeps a received message counter and sent message counter, both -are initialized to zero. -All future communication is encrypted using chacha20poly1305. -The key used to send the message is the sending key, and the key used to decode -the message is the receiving key. -The nonce for chacha20poly1305 is the relevant message counter. -It is critical that the message counter is incremented every time you send a -message and every time you receive a message that decodes correctly. - -Each peer now signs the challenge with their persistent private key, and -sends the other peer an AuthSigMsg, containing their persistent public -key and the signature. On receiving an AuthSigMsg, the peer verifies the -signature. - -The peers are now authenticated. - -The communication maintains Perfect Forward Secrecy, as -the persistent key pair was not used for generating secrets - only for -authenticating. - -## Caveat - -This system is still vulnerable to a Man-In-The-Middle attack if the -persistent public key of the remote node is not known in advance. The -only way to mitigate this is with a public key authentication system, -such as the Web-of-Trust or Certificate Authorities. In our case, we can -use the blockchain itself as a certificate authority to ensure that we -are connected to at least one validator. - -## Config - -Authenticated encryption is enabled by default. - -## Specification - -The full p2p specification can be found [here](https://docs.tendermint.com/master/spec/p2p/). - -## Additional Reading - -- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) -- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and - Michael J. - Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) -- [Further work on secret - handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) diff --git a/docs/tendermint-core/state-sync.md b/docs/tendermint-core/state-sync.md new file mode 100644 index 000000000..77c10c4de --- /dev/null +++ b/docs/tendermint-core/state-sync.md @@ -0,0 +1,48 @@ +--- +order: 11 +--- + +# State Sync + +With fast sync a node is downloading all of the data of an application from genesis and verifying it. +With state sync your node will download data related to the head or near the head of the chain and verify the data. +This leads to drastically shorter times for joining a network. + +## Using State Sync + +State sync will continuously work in the background to supply nodes with chunked data when bootstrapping. + +> NOTE: Before trying to use state sync, see if the application you are operating a node for supports it. + +Under the state sync section in `config.toml` you will find multiple settings that need to be configured in order for your node to use state sync. + +Lets breakdown the settings: + +- `enable`: Enable is to inform the node that you will be using state sync to bootstrap your node. +- `rpc_servers`: RPC servers are needed because state sync utilizes the light client for verification. + - 2 servers are required, more is always helpful. +- `temp_dir`: Temporary directory is store the chunks in the machines local storage, If nothing is set it will create a directory in `/tmp` + +The next information you will need to acquire it through publicly exposed RPC's or a block explorer which you trust. + +- `trust_height`: Trusted height defines at which height your node should trust the chain. +- `trust_hash`: Trusted hash is the hash in the `BlockID` corresponding to the trusted height. +- `trust_period`: Trust period is the period in which headers can be verified. + > :warning: This value should be significantly smaller than the unbonding period. + +If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. + +Example: + +```bash +curl -s https://233.123.0.140:26657:26657/commit | jq "{height: .result.signed_header.header.height, hash: .result.signed_header.commit.block_id.hash}" +``` + +The response will be: + +```json +{ + "height": "273", + "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" +} +``` diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/tendermint-core/subscription.md similarity index 77% rename from docs/app-dev/subscribing-to-events-via-websocket.md rename to docs/tendermint-core/subscription.md index 6e4f0d207..067d0bf51 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/tendermint-core/subscription.md @@ -1,19 +1,26 @@ --- -order: 5 +order: 7 --- # Subscribing to events via Websocket -Tendermint emits different events, to which you can subscribe via +Tendermint emits different events, which you can subscribe to via [Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysis) or inspecting state. +for third-party applications (for analysis) or for inspecting state. [List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) -You can subscribe to any of the events above by calling `subscribe` RPC -method via Websocket. +To connect to a node via websocket from the CLI, you can use a tool such as +[wscat](https://github.com/websockets/wscat) and run: +```sh +wscat ws://127.0.0.1:26657/websocket ``` + +You can subscribe to any of the events above by calling the `subscribe` RPC +method via Websocket along with a valid query. + +```json { "jsonrpc": "2.0", "method": "subscribe", @@ -31,7 +38,7 @@ You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](./indexing-transactions.md) for details. -### ValidatorSetUpdates +## ValidatorSetUpdates When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same @@ -41,7 +48,7 @@ the ABCI spec). Response: -``` +```json { "jsonrpc": "2.0", "id": 0, diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index b33e770d7..bf39d8678 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -20,7 +20,7 @@ this by setting the `TMHOME` environment variable. Initialize the root directory by running: -``` +```sh tendermint init ``` @@ -29,9 +29,9 @@ genesis file (`genesis.json`) containing the associated public key, in `$TMHOME/config`. This is all that's necessary to run a local testnet with one validator. -For more elaborate initialization, see the tesnet command: +For more elaborate initialization, see the testnet command: -``` +```sh tendermint testnet --help ``` @@ -44,33 +44,54 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g #### Fields - `genesis_time`: Official time of blockchain start. -- `chain_id`: ID of the blockchain. This must be unique for - every blockchain. If your testnet blockchains do not have unique +- `chain_id`: ID of the blockchain. **This must be unique for + every blockchain.** If your testnet blockchains do not have unique chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. -- `consensus_params` - - `block` - - `time_iota_ms`: Minimum time increment between consecutive blocks (in +- `initial_height`: Height at which Tendermint should begin at. If a blockchain is conducting a network upgrade, + starting from the stopped height brings uniqueness to previous heights. +- `consensus_params` [spec](https://github.com/tendermint/spec/blob/master/spec/core/state.md#consensusparams) + - `block` + - `max_bytes`: Max block size, in bytes. + - `max_gas`: Max gas per block. + - `time_iota_ms`: Minimum time increment between consecutive blocks (in milliseconds). If the block header timestamp is ahead of the system clock, decrease this value. + - `evidence` + - `max_age_num_blocks`: Max age of evidence, in blocks. The basic formula + for calculating this is: MaxAgeDuration / {average block time}. + - `max_age_duration`: Max age of evidence, in time. It should correspond + with an app's "unbonding period" or other similar mechanism for handling + [Nothing-At-Stake + attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + - `max_num`: This sets the maximum number of evidence that can be committed + in a single block. and should fall comfortably under the max block + bytes when we consider the size of each evidence. + - `validator` + - `pub_key_types`: Public key types validators can use. + - `version` + - `app_version`: ABCI application version. - `validators`: List of initial validators. Note this may be overridden entirely by the application, and may be left empty to make explicit that the application will initialize the validator set with ResponseInitChain. - - `pub_key`: The first element specifies the `pub_key` type. 1 + - `pub_key`: The first element specifies the `pub_key` type. 1 == Ed25519. The second element are the pubkey bytes. - - `power`: The validator's voting power. - - `name`: Name of the validator (optional). + - `power`: The validator's voting power. + - `name`: Name of the validator (optional). - `app_hash`: The expected application hash (as returned by the `ResponseInfo` ABCI message) upon genesis. If the app's hash does not match, Tendermint will panic. - `app_state`: The application state (e.g. initial distribution of tokens). +> :warning: **ChainID must be unique to every blockchain. Reusing old chainID can cause issues** + #### Sample genesis.json -``` +```json { - "genesis_time": "2018-11-13T18:11:50.277637Z", - "chain_id": "test-chain-s4ui7D", + "genesis_time": "2020-04-21T11:17:42.341227868Z", + "chain_id": "test-chain-ROp9KF", + "initial_height": "0", "consensus_params": { "block": { "max_bytes": "22020096", @@ -78,8 +99,9 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g "time_iota_ms": "1000" }, "evidence": { - "max_age_num_blocks": "100000" - "max_age_duration": "10000" + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_num": 50, }, "validator": { "pub_key_types": [ @@ -89,10 +111,10 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g }, "validators": [ { - "address": "39C04A480B54AB258A45355A5E48ADDED9956C65", + "address": "B547AB87E79F75A4A3198C57A8C2FDAF8628CB47", "pub_key": { "type": "tendermint/PubKeyEd25519", - "value": "DMEMMj1+thrkUCGocbvvKzXeaAtRslvX9MWtB+smuIA=" + "value": "P/V6GHuZrb8rs/k1oBorxc6vyXMlnzhJmv7LmjELDys=" }, "power": "10", "name": "" @@ -106,7 +128,7 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g To run a Tendermint node, use: -```sh +```bash tendermint node ``` @@ -115,7 +137,7 @@ By default, Tendermint will try to connect to an ABCI application on another window. If you don't, kill Tendermint and run an in-process version of the `kvstore` app: -```sh +```bash tendermint node --proxy_app=kvstore ``` @@ -129,7 +151,7 @@ in-process with Tendermint if it's written in Go. If your app is not written in Go, run it in another process, and use the `--proxy_app` flag to specify the address of the socket it is listening on, for instance: -```sh +```bash tendermint node --proxy_app=/var/run/abci.sock ``` @@ -140,33 +162,28 @@ You can find out what flags are supported by running `tendermint node --help`. To send a transaction, use `curl` to make requests to the Tendermint RPC server, for example: -``` +```sh curl http://localhost:26657/broadcast_tx_commit?tx=\"abcd\" ``` We can see the chain's status at the `/status` end-point: -``` +```sh curl http://localhost:26657/status | json_pp ``` and the `latest_app_hash` in particular: -``` +```sh curl http://localhost:26657/status | json_pp | grep latest_app_hash ``` - - -Visit http://localhost:26657 in your browser to see the list of other +Visit `http://localhost:26657` in your browser to see the list of other endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. - -::: tip -Find the RPC Documentation [here](https://docs.tendermint.com/master/rpc/) -::: +> TIP: Find the RPC Documentation [here](https://docs.tendermint.com/master/rpc/) ### Formatting @@ -175,24 +192,24 @@ taken into account: With `GET`: -To send a UTF8 string byte array, quote the value of the tx pramater: +To send a UTF8 string byte array, quote the value of the tx parameter: -``` +```sh curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' ``` which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. -Note the URL must be wrapped with single quoes, else bash will ignore +Note the URL must be wrapped with single quotes, else bash will ignore the double quotes. To avoid the single quotes, escape the double quotes: -``` +```sh curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" ``` Using a special character: -``` +```sh curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' ``` @@ -200,7 +217,7 @@ sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. To send as raw hex, omit quotes AND prefix the hex string with `0x`: -``` +```sh curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 ``` @@ -208,7 +225,7 @@ which sends a 4 byte transaction: \[01 02 03 04\]. With `POST` (using `json`), the raw hex must be `base64` encoded: -``` +```sh curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 ``` @@ -218,14 +235,13 @@ Note that raw hex cannot be used in `POST` transactions. ## Reset -::: warning -**UNSAFE** Only do this in development and only if you can +> :warning: **UNSAFE** Only do this in development and only if you can afford to lose all blockchain data! -::: + To reset a blockchain, stop the node and run: -``` +```sh tendermint unsafe_reset_all ``` @@ -246,7 +262,7 @@ Some fields from the config file can be overwritten with flags. ## No Empty Blocks -While the default behaviour of `tendermint` is still to create blocks +While the default behavior of `tendermint` is still to create blocks approximately once per second, it is possible to disable empty blocks or set a block creation interval. In the former case, blocks will be created when there are new transactions or when the AppHash changes. @@ -255,13 +271,13 @@ To configure Tendermint to not produce empty blocks unless there are transactions or the app hash changes, run Tendermint with this additional flag: -``` +```sh tendermint node --consensus.create_empty_blocks=false ``` or set the configuration via the `config.toml` file: -``` +```toml [consensus] create_empty_blocks = false ``` @@ -272,13 +288,13 @@ empty blocks requires the config option to be set to `false`. The block interval setting allows for a delay (in time.Duration format [ParseDuration](https://golang.org/pkg/time/#ParseDuration)) between the creation of each new empty block. It can be set with this additional flag: -``` +```sh --consensus.create_empty_blocks_interval="5s" ``` or set the configuration via the `config.toml` file: -``` +```toml [consensus] create_empty_blocks_interval = "5s" ``` @@ -298,7 +314,7 @@ eventually included in a block. Since there are multiple phases to processing a transaction, we offer multiple endpoints to broadcast a transaction: -``` +```md /broadcast_tx_async /broadcast_tx_sync /broadcast_tx_commit @@ -333,7 +349,7 @@ When `tendermint init` is run, both a `genesis.json` and `priv_validator_key.json` are created in `~/.tendermint/config`. The `genesis.json` might look like: -``` +```json { "validators" : [ { @@ -353,7 +369,7 @@ When `tendermint init` is run, both a `genesis.json` and And the `priv_validator_key.json`: -``` +```json { "last_step" : 0, "last_round" : "0", @@ -416,7 +432,7 @@ another address from the address book. On restarts you will always try to connect to these peers regardless of the size of your address book. All peers relay peers they know of by default. This is called the peer exchange -protocol (PeX). With PeX, peers will be gossipping about known peers and forming +protocol (PeX). With PeX, peers will be gossiping about known peers and forming a network, storing peer addresses in the addrbook. Because of this, you don't have to use a seed node if you have a live persistent peer. @@ -430,14 +446,14 @@ persistent connections with. For example, -``` +```sh tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656" ``` Alternatively, you can use the `/dial_seeds` endpoint of the RPC to specify seeds for a running node to connect to: -``` +```sh curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' ``` @@ -450,7 +466,7 @@ maintain a persistent connection with each, you can use the `config.toml` or the `/dial_peers` RPC endpoint to do it without stopping Tendermint core instance. -``` +```sh tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656" curl 'localhost:26657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656"\]' @@ -473,14 +489,14 @@ before starting the network. For instance, we could make a new We can generate a new `priv_validator_key.json` with the command: -``` +```sh tendermint gen_validator ``` Now we can update our genesis file. For instance, if the new `priv_validator_key.json` looks like: -``` +```json { "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902", "pub_key" : { @@ -499,7 +515,7 @@ Now we can update our genesis file. For instance, if the new then the new `genesis.json` will be: -``` +```json { "validators" : [ { diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 911405d58..084fe27fa 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -1,3 +1,7 @@ +--- +order: 6 +--- + # Validators Validators are responsible for committing new blocks in the blockchain. @@ -57,16 +61,18 @@ When initializing nodes there are five parameters in the `config.toml` that may - `unconditional_peer_ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. - `private_peer_ids:` comma separated list of nodeID's. These nodes will not be gossiped to the network. This is an important field as you do not want your validator IP gossiped to the network. - `addr_book_strict:` boolean. By default nodes with a routable address will be considered for connection. If this setting is turned off (false), non-routable IP addresses, like addresses in a private network can be added to the address book. +- `double_sign_check_height` int64 height. How many blocks to look back to check existence of the node's consensus votes before joining consensus When non-zero, the node will panic upon restart if the same consensus key was used to sign {double_sign_check_height} last blocks. So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. #### Validator Node Configuration -| Config Option | Setting | -| ---------------------- | -------------------------- | -| pex | false | -| persistent_peers | list of sentry nodes | -| private_peer_ids | none | -| unconditional_peer_ids | optionally sentry node IDs | -| addr_book_strict | false | +| Config Option | Setting | +| ------------------------ | -------------------------- | +| pex | false | +| persistent_peers | list of sentry nodes | +| private_peer_ids | none | +| unconditional_peer_ids | optionally sentry node IDs | +| addr_book_strict | false | +| double_sign_check_height | 10 | The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. @@ -86,8 +92,8 @@ The sentry nodes should be able to talk to the entire network hence why `pex=tru More Information can be found at these links: -- https://kb.certus.one/ -- https://forum.cosmos.network/t/sentry-node-architecture-overview/454 +- +- ### Validator keys @@ -97,7 +103,7 @@ Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are wi ## Committing a Block -_+2/3 is short for "more than 2/3"_ +> **+2/3 is short for "more than 2/3"** A block is committed when +2/3 of the validator set sign [precommit votes](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#vote) for that block at the same `round`. diff --git a/docs/tools/README.md b/docs/tools/README.md index 86ba128f6..720022f96 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -16,14 +16,14 @@ Tendermint has some tools that are associated with it for: ## Benchmarking -- https://github.com/informalsystems/tm-load-test +- `tm-load-test` is a distributed load testing tool (and framework) for load testing Tendermint networks. ## Testnets -- https://github.com/informalsystems/testnets +- This repository contains various different configurations of test networks for, and relating to, Tendermint. diff --git a/docs/tools/debugging.md b/docs/tools/debugging.md index 50961dd3b..17fa0ec11 100644 --- a/docs/tools/debugging.md +++ b/docs/tools/debugging.md @@ -9,14 +9,14 @@ state, the node' status, the WAL, and even the stack trace of the process before exit. These files can be useful to examine when debugging a faulty Tendermint process. -```sh +```bash tendermint debug kill
--home=
``` will write debug info into a compressed archive. The archive will contain the following: -``` +```sh ├── config.toml ├── consensus_state.json ├── net_info.json @@ -29,14 +29,14 @@ Under the hood, `debug kill` fetches info from `/status`, `/net_info`, and `/dump_consensus_state` HTTP endpoints, and kills the process with `-6`, which catches the go-routine dump. -## tendermint debug dump +## Tendermint debug dump Also, the `debug dump` sub-command allows you to dump debugging data into compressed archives at a regular interval. These archives contain the goroutine and heap profiles in addition to the consensus state, network info, node status, and even the WAL. -```sh +```bash tendermint debug dump --home= ``` @@ -44,7 +44,7 @@ will perform similarly to `kill` except it only polls the node and dumps debugging data every frequency seconds to a compressed archive under a given destination directory. Each archive will contain: -``` +```sh ├── consensus_state.json ├── goroutine.out ├── heap.out diff --git a/docs/tools/remote-signer-validation.md b/docs/tools/remote-signer-validation.md index c8a948e3e..3099d7e4d 100644 --- a/docs/tools/remote-signer-validation.md +++ b/docs/tools/remote-signer-validation.md @@ -5,7 +5,7 @@ repository](https://github.com/tendermint/tendermint). The Tendermint remote signer test harness facilitates integration testing between Tendermint and remote signers such as -[KMS](https://github.com/tendermint/kms). Such remote signers allow for signing +[tkkms](https://github.com/iqlusioninc/tmkms). Such remote signers allow for signing of important Tendermint messages using [HSMs](https://en.wikipedia.org/wiki/Hardware_security_module), providing additional security. @@ -21,10 +21,12 @@ When executed, `tm-signer-harness`: error. ## Prerequisites + Requires the same prerequisites as for building [Tendermint](https://github.com/tendermint/tendermint). ## Building + From the `tools/tm-signer-harness` directory in your Tendermint source repository, simply run: @@ -36,6 +38,7 @@ make install ``` ## Docker Image + To build a Docker image containing the `tm-signer-harness`, also from the `tools/tm-signer-harness` directory of your Tendermint source repo, simply run: @@ -44,14 +47,16 @@ make docker-image ``` ## Running against KMS + As an example of how to use `tm-signer-harness`, the following instructions show -you how to execute its tests against [KMS](https://github.com/tendermint/kms). +you how to execute its tests against [tkkms](https://github.com/iqlusioninc/tmkms). For this example, we will make use of the **software signing module in KMS**, as the hardware signing module requires a physical [YubiHSM](https://www.yubico.com/products/yubihsm/) device. ### Step 1: Install KMS on your local machine -See the [KMS repo](https://github.com/tendermint/kms) for details on how to set + +See the [tkkms repo](https://github.com/iqlusioninc/tmkms) for details on how to set KMS up on your local machine. If you have [Rust](https://www.rust-lang.org/) installed on your local machine, @@ -62,6 +67,7 @@ cargo install tmkms ``` ### Step 2: Make keys for KMS + The KMS software signing module needs a key with which to sign messages. In our example, we will simply export a signing key from our local Tendermint instance. @@ -85,6 +91,7 @@ tmkms keygen secret_connection.key ``` ### Step 3: Configure and run KMS + KMS needs some configuration to tell it to use the softer signing module as well as the `signing.key` file we just generated. Save the following to a file called `tmkms.toml`: @@ -111,6 +118,7 @@ This will start KMS, which will repeatedly try to connect to `tcp://127.0.0.1:61219` until it is successful. ### Step 4: Run tm-signer-harness + Now we get to run the signer test harness: ```bash @@ -124,10 +132,12 @@ should now exit with a 0 exit code. If they are somehow not compatible, it should exit with a meaningful non-zero exit code (see the exit codes below). ### Step 5: Shut down KMS + Simply hit Ctrl+Break on your KMS instance (or use the `kill` command in Linux) to terminate it gracefully. ## Exit Code Meanings + The following list shows the various exit codes from `tm-signer-harness` and their meanings: diff --git a/docs/guides/go-built-in.md b/docs/tutorials/go-built-in.md similarity index 88% rename from docs/guides/go-built-in.md rename to docs/tutorials/go-built-in.md index 5ab71b829..9611cff00 100644 --- a/docs/guides/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -38,25 +38,18 @@ Go](https://golang.org/doc/install). Verify that you have the latest version of Go installed: -```sh +```bash $ go version -go version go1.13.1 darwin/amd64 -``` - -Make sure you have `$GOPATH` environment variable set: - -```sh -$ echo $GOPATH -/Users/melekes/go +go version go1.15.1 darwin/amd64 ``` ## 1.2 Creating a new Go project We'll start by creating a new Go project. -```sh -$ mkdir kvstore -$ cd kvstore +```bash +mkdir kvstore +cd kvstore ``` Inside the example directory create a `main.go` file with the following content: @@ -75,7 +68,7 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```sh +```bash $ go run main.go Hello, Tendermint Core ``` @@ -84,7 +77,7 @@ Hello, Tendermint Core Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -109,10 +102,6 @@ func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo return abcitypes.ResponseInfo{} } -func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption { - return abcitypes.ResponseSetOption{} -} - func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { return abcitypes.ResponseDeliverTx{Code: 0} } @@ -140,6 +129,22 @@ func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes. func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { return abcitypes.ResponseEndBlock{} } + +func (KVStoreApplication) ListSnapshots(abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots { + return abcitypes.ResponseListSnapshots{} +} + +func (KVStoreApplication) OfferSnapshot(abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot { + return abcitypes.ResponseOfferSnapshot{} +} + +func (KVStoreApplication) LoadSnapshotChunk(abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk { + return abcitypes.ResponseLoadSnapshotChunk{} +} + +func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk { + return abcitypes.ResponseApplySnapshotChunk{} +} ``` Now I will go through each method explaining when it's called and adding @@ -230,7 +235,7 @@ application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and `EndBlock` in the end. DeliverTx are being transfered asynchronously, but the responses are expected to come in order. -``` +```go func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { app.currentBatch = app.db.NewTransaction(true) return abcitypes.ResponseBeginBlock{} @@ -288,7 +293,7 @@ the application's `Query` method. Applications are free to provide their own APIs. But by using Tendermint Core as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/lite2)) can leverage +package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage the unified API across different applications. Plus they won't have to call the otherwise separate Tendermint Core API for additional proofs. @@ -331,6 +336,7 @@ Put the following code into the "main.go" file: package main import ( + "errors" "flag" "fmt" "os" @@ -339,7 +345,6 @@ import ( "syscall" "github.com/dgraph-io/badger" - "github.com/pkg/errors" "github.com/spf13/viper" abci "github.com/tendermint/tendermint/abci/types" @@ -393,13 +398,13 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { config.RootDir = filepath.Dir(filepath.Dir(configFile)) viper.SetConfigFile(configFile) if err := viper.ReadInConfig(); err != nil { - return nil, errors.Wrap(err, "viper failed to read config file") + return nil, fmt.Errorf("viper failed to read config file: %w", err) } if err := viper.Unmarshal(config); err != nil { - return nil, errors.Wrap(err, "viper failed to unmarshal config") + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) } if err := config.ValidateBasic(); err != nil { - return nil, errors.Wrap(err, "config is invalid") + return nil, fmt.Errorf("config is invalid: %w", err) } // create logger @@ -407,7 +412,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { var err error logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) if err != nil { - return nil, errors.Wrap(err, "failed to parse log level") + return nil, fmt.Errorf("failed to parse log level: %w", err) } // read private validator @@ -419,7 +424,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { // read node key nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { - return nil, errors.Wrap(err, "failed to load node's key") + return nil, fmt.Errorf("failed to load node's key: %w", err) } // create node @@ -433,7 +438,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { nm.DefaultMetricsProvider(config.Instrumentation), logger) if err != nil { - return nil, errors.Wrap(err, "failed to create new Tendermint node") + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) } return node, nil @@ -485,7 +490,7 @@ node, err := nm.NewNode( nm.DefaultMetricsProvider(config.Instrumentation), logger) if err != nil { - return nil, errors.Wrap(err, "failed to create new Tendermint node") + return nil, fmt.Errorf("failed to create new Tendermint node: %w", err) } ``` @@ -503,13 +508,13 @@ config := cfg.DefaultConfig() config.RootDir = filepath.Dir(filepath.Dir(configFile)) viper.SetConfigFile(configFile) if err := viper.ReadInConfig(); err != nil { - return nil, errors.Wrap(err, "viper failed to read config file") + return nil, fmt.Errorf("viper failed to read config file: %w", err) } if err := viper.Unmarshal(config); err != nil { - return nil, errors.Wrap(err, "viper failed to unmarshal config") + return nil, fmt.Errorf("viper failed to unmarshal config: %w", err) } if err := config.ValidateBasic(); err != nil { - return nil, errors.Wrap(err, "config is invalid") + return nil, fmt.Errorf("config is invalid: %w", err) } ``` @@ -530,7 +535,7 @@ pv := privval.LoadFilePV( ```go nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { - return nil, errors.Wrap(err, "failed to load node's key") + return nil, fmt.Errorf("failed to load node's key: %w", err) } ``` @@ -543,7 +548,7 @@ logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) var err error logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) if err != nil { - return nil, errors.Wrap(err, "failed to parse log level") + return nil, fmt.Errorf("failed to parse log level: %w", err) } ``` @@ -568,20 +573,40 @@ os.Exit(0) We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management. -```sh -$ go mod init github.com/me/example -$ go build +```bash +export GO111MODULE=on +go mod init github.com/me/example +``` + +This should create a `go.mod` file. The current tutorial only works with +tendermint > v0.34, so let's make sure we're using the latest version: + +```go +module github.com/me/example + +go 1.15 + +require ( + github.com/dgraph-io/badger v1.6.2 + github.com/tendermint/tendermint v0.34.0-rc4 +) ``` -This should build the binary. +Now we can build the binary: + +```bash +go build +``` To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. Please refer to [the official guide](https://docs.tendermint.com/master/introduction/install.html). If you're -installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). +installing from source, don't forget to checkout the latest release (`git +checkout vX.Y.Z`). Don't forget to check that the application uses the same +major version. -```sh +```bash $ rm -rf /tmp/example $ TMHOME="/tmp/example" tendermint init @@ -592,7 +617,7 @@ I[2019-07-16|18:40:36.482] Generated genesis file module=m We are ready to start our application: -```sh +```bash $ ./example -config "/tmp/example/config/config.toml" badger 2019/07/16 18:42:25 INFO: All 0 tables opened in 0s @@ -605,7 +630,7 @@ I[2019-07-16|18:42:26.865] Committed state module=s Now open another tab in your terminal and try sending a transaction: -```sh +```bash $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { "jsonrpc": "2.0", @@ -625,7 +650,7 @@ Response should contain the height where this transaction was committed. Now let's check if the given key now exists and its value: -``` +```json $ curl -s 'localhost:26657/abci_query?data="tendermint"' { "jsonrpc": "2.0", diff --git a/docs/guides/go.md b/docs/tutorials/go.md similarity index 89% rename from docs/guides/go.md rename to docs/tutorials/go.md index f688d0e4e..728467cfc 100644 --- a/docs/guides/go.md +++ b/docs/tutorials/go.md @@ -41,25 +41,18 @@ Go](https://golang.org/doc/install). Verify that you have the latest version of Go installed: -```sh +```bash $ go version -go version go1.13.1 darwin/amd64 -``` - -Make sure you have `$GOPATH` environment variable set: - -```sh -$ echo $GOPATH -/Users/melekes/go +go version go1.15.1 darwin/amd64 ``` ## 1.2 Creating a new Go project We'll start by creating a new Go project. -```sh -$ mkdir kvstore -$ cd kvstore +```bash +mkdir kvstore +cd kvstore ``` Inside the example directory create a `main.go` file with the following content: @@ -78,8 +71,8 @@ func main() { When run, this should print "Hello, Tendermint Core" to the standard output. -```sh -$ go run main.go +```bash +go run main.go Hello, Tendermint Core ``` @@ -87,7 +80,7 @@ Hello, Tendermint Core Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -112,10 +105,6 @@ func (KVStoreApplication) Info(req abcitypes.RequestInfo) abcitypes.ResponseInfo return abcitypes.ResponseInfo{} } -func (KVStoreApplication) SetOption(req abcitypes.RequestSetOption) abcitypes.ResponseSetOption { - return abcitypes.ResponseSetOption{} -} - func (KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcitypes.ResponseDeliverTx { return abcitypes.ResponseDeliverTx{Code: 0} } @@ -143,6 +132,22 @@ func (KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes. func (KVStoreApplication) EndBlock(req abcitypes.RequestEndBlock) abcitypes.ResponseEndBlock { return abcitypes.ResponseEndBlock{} } + +func (KVStoreApplication) ListSnapshots(abcitypes.RequestListSnapshots) abcitypes.ResponseListSnapshots { + return abcitypes.ResponseListSnapshots{} +} + +func (KVStoreApplication) OfferSnapshot(abcitypes.RequestOfferSnapshot) abcitypes.ResponseOfferSnapshot { + return abcitypes.ResponseOfferSnapshot{} +} + +func (KVStoreApplication) LoadSnapshotChunk(abcitypes.RequestLoadSnapshotChunk) abcitypes.ResponseLoadSnapshotChunk { + return abcitypes.ResponseLoadSnapshotChunk{} +} + +func (KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshotChunk) abcitypes.ResponseApplySnapshotChunk { + return abcitypes.ResponseApplySnapshotChunk{} +} ``` Now I will go through each method explaining when it's called and adding @@ -233,12 +238,11 @@ application in 3 parts: `BeginBlock`, one `DeliverTx` per transaction and `EndBlock` in the end. DeliverTx are being transferred asynchronously, but the responses are expected to come in order. -``` +```go func (app *KVStoreApplication) BeginBlock(req abcitypes.RequestBeginBlock) abcitypes.ResponseBeginBlock { app.currentBatch = app.db.NewTransaction(true) return abcitypes.ResponseBeginBlock{} } - ``` Here we create a batch, which will store block's transactions. @@ -291,7 +295,7 @@ the application's `Query` method. Applications are free to provide their own APIs. But by using Tendermint Core as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/lite2)) can leverage +package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage the unified API across different applications. Plus they won't have to call the otherwise separate Tendermint Core API for additional proofs. @@ -425,23 +429,42 @@ os.Exit(0) We are going to use [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management. -```sh -$ export GO111MODULE=on -$ go mod init github.com/me/example -$ go build +```bash +export GO111MODULE=on +go mod init github.com/me/example +``` + +This should create a `go.mod` file. The current tutorial only works with +tendermint > v0.34, so let's make sure we're using the latest version: + +```go +module github.com/me/example + +go 1.15 + +require ( + github.com/dgraph-io/badger v1.6.2 + github.com/tendermint/tendermint v0.34.0-rc4 +) ``` -This should build the binary. +Now we can build the binary: + +```bash +go build +``` To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. Please refer to [the official guide](https://docs.tendermint.com/master/introduction/install.html). If you're -installing from source, don't forget to checkout the latest release (`git checkout vX.Y.Z`). +installing from source, don't forget to checkout the latest release (`git +checkout vX.Y.Z`). Don't forget to check that the application uses the same +major version. -```sh -$ rm -rf /tmp/example -$ TMHOME="/tmp/example" tendermint init +```bash +rm -rf /tmp/example +TMHOME="/tmp/example" tendermint init I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json @@ -454,9 +477,9 @@ Feel free to explore the generated files, which can be found at We are ready to start our application: -```sh -$ rm example.sock -$ ./example +```bash +rm example.sock +./example badger 2019/07/16 18:25:11 INFO: All 0 tables opened in 0s badger 2019/07/16 18:25:11 INFO: Replaying file id: 0 at offset: 0 @@ -467,8 +490,8 @@ I[2019-07-16|18:25:11.523] Starting ABCIServer impl=ABC Then we need to start Tendermint Core and point it to our application. Staying within the application directory execute: -```sh -$ TMHOME="/tmp/example" tendermint node --proxy_app=unix://example.sock +```bash +TMHOME="/tmp/example" tendermint node --proxy_app=unix://example.sock I[2019-07-16|18:26:20.362] Version info module=main software=0.32.1 block=10 p2p=7 I[2019-07-16|18:26:20.383] Starting Node module=main impl=Node @@ -480,7 +503,7 @@ I[2019-07-16|18:26:21.446] Committed state module=s This should start the full node and connect to our ABCI application. -``` +```sh I[2019-07-16|18:25:11.525] Waiting for new connection... I[2019-07-16|18:26:20.329] Accepted a new connection I[2019-07-16|18:26:20.329] Waiting for new connection... @@ -491,8 +514,8 @@ I[2019-07-16|18:26:20.330] Accepted a new connection Now open another tab in your terminal and try sending a transaction: -```sh -$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +```json +curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { "jsonrpc": "2.0", "id": "", @@ -510,8 +533,8 @@ Response should contain the height where this transaction was committed. Now let's check if the given key now exists and its value: -``` -$ curl -s 'localhost:26657/abci_query?data="tendermint"' +```json +curl -s 'localhost:26657/abci_query?data="tendermint"' { "jsonrpc": "2.0", "id": "", diff --git a/docs/guides/java.md b/docs/tutorials/java.md similarity index 90% rename from docs/guides/java.md rename to docs/tutorials/java.md index 12bbc4565..dbd005957 100644 --- a/docs/guides/java.md +++ b/docs/tutorials/java.md @@ -40,7 +40,7 @@ Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/t Verify that you have installed Java successfully: -```sh +```bash $ java -version java version "12.0.2" 2019-07-16 Java(TM) SE Runtime Environment (build 12.0.2+10) @@ -52,7 +52,7 @@ This guide is written using Java SE Development Kit 12. Make sure you have `$JAVA_HOME` environment variable set: -```sh +```bash $ echo $JAVA_HOME /Library/Java/JavaVirtualMachines/jdk-12.0.2.jdk/Contents/Home ``` @@ -63,21 +63,21 @@ For Gradle installation, please refer to [their official guide](https://gradle.o We'll start by creating a new Gradle project. -```sh -$ export KVSTORE_HOME=~/kvstore -$ mkdir $KVSTORE_HOME -$ cd $KVSTORE_HOME +```bash +export KVSTORE_HOME=~/kvstore +mkdir $KVSTORE_HOME +cd $KVSTORE_HOME ``` Inside the example directory run: -```sh +```bash gradle init --dsl groovy --package io.example --project-name example --type java-application --test-framework junit ``` This will create a new project for you. The tree of files should look like: -```sh +```bash $ tree . |-- build.gradle @@ -105,7 +105,7 @@ $ tree When run, this should print "Hello world." to the standard output. -```sh +```bash $ ./gradlew run > Task :run Hello world. @@ -115,7 +115,7 @@ Hello world. Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -166,19 +166,31 @@ Now we should be ready to compile the `*.proto` files. Copy the necessary `.proto` files to your project: -```sh +```bash mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/kv \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto -cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto -cp $GOPATH/src/github.com/tendermint/tendermint/libs/kv/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/kv/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/version.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/version.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto ``` @@ -195,13 +207,13 @@ dependencies { To generate all protobuf-type classes run: -```sh +```bash ./gradlew generateProto ``` To verify that everything went smoothly, you can inspect the `build/generated/` directory: -```sh +```bash $ tree build/generated/ build/generated/ |-- source @@ -430,7 +442,7 @@ the application's `Query` method. Applications are free to provide their own APIs. But by using Tendermint Core as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage the unified API across different applications. Plus they won't have to call the otherwise separate Tendermint Core API for additional proofs. @@ -534,7 +546,7 @@ To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. -```sh +```bash $ rm -rf /tmp/example $ cd $GOPATH/src/github.com/tendermint/tendermint $ make install @@ -551,7 +563,7 @@ Feel free to explore the generated files, which can be found at We are ready to start our application: -```sh +```bash ./gradlew run gRPC server started, listening on 26658 @@ -560,7 +572,7 @@ gRPC server started, listening on 26658 Then we need to start Tendermint Core and point it to our application. Staying within the application directory execute: -```sh +```bash $ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 @@ -572,7 +584,7 @@ I[2019-07-28|15:44:54.814] Committed state module=s Now open another tab in your terminal and try sending a transaction: -```sh +```bash $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { "jsonrpc": "2.0", @@ -591,7 +603,7 @@ Response should contain the height where this transaction was committed. Now let's check if the given key now exists and its value: -```sh +```bash $ curl -s 'localhost:26657/abci_query?data="tendermint"' { "jsonrpc": "2.0", diff --git a/docs/guides/kotlin.md b/docs/tutorials/kotlin.md similarity index 88% rename from docs/guides/kotlin.md rename to docs/tutorials/kotlin.md index 0c15098a4..50f846e68 100644 --- a/docs/guides/kotlin.md +++ b/docs/tutorials/kotlin.md @@ -40,8 +40,8 @@ Please refer to [the Oracle's guide for installing JDK](https://www.oracle.com/t Verify that you have installed Java successfully: -```sh -$ java -version +```bash +java -version java version "1.8.0_162" Java(TM) SE Runtime Environment (build 1.8.0_162-b12) Java HotSpot(TM) 64-Bit Server VM (build 25.162-b12, mixed mode) @@ -52,8 +52,8 @@ In my case it is Java SE Development Kit 8. Make sure you have `$JAVA_HOME` environment variable set: -```sh -$ echo $JAVA_HOME +```bash +echo $JAVA_HOME /Library/Java/JavaVirtualMachines/jdk1.8.0_162.jdk/Contents/Home ``` @@ -63,22 +63,22 @@ For Gradle installation, please refer to [their official guide](https://gradle.o We'll start by creating a new Gradle project. -```sh -$ export KVSTORE_HOME=~/kvstore -$ mkdir $KVSTORE_HOME -$ cd $KVSTORE_HOME +```bash +export KVSTORE_HOME=~/kvstore +mkdir $KVSTORE_HOME +cd $KVSTORE_HOME ``` Inside the example directory run: -```sh +```bash gradle init --dsl groovy --package io.example --project-name example --type kotlin-application ``` This will create a new project for you. The tree of files should look like: -```sh -$ tree +```bash +tree . |-- build.gradle |-- gradle @@ -105,8 +105,8 @@ $ tree When run, this should print "Hello world." to the standard output. -```sh -$ ./gradlew run +```bash +./gradlew run > Task :run Hello world. ``` @@ -115,7 +115,7 @@ Hello world. Tendermint Core communicates with the application through the Application BlockChain Interface (ABCI). All message types are defined in the [protobuf -file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto). +file](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/abci/types.proto). This allows Tendermint Core to run applications written in any programming language. @@ -166,19 +166,31 @@ Now we should be ready to compile the `*.proto` files. Copy the necessary `.proto` files to your project: -```sh +```bash mkdir -p \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/kv \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs \ $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto -cp $GOPATH/src/github.com/tendermint/tendermint/abci/types/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/abci/types/types.proto -cp $GOPATH/src/github.com/tendermint/tendermint/crypto/merkle/merkle.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/crypto/merkle/merkle.proto -cp $GOPATH/src/github.com/tendermint/tendermint/libs/kv/types.proto \ - $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/libs/kv/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/abci/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/version/version.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/version/version.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/types.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/evidence.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/types/params.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/types/params.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/merkle.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/crypto/keys.proto +cp $GOPATH/src/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto \ + $KVSTORE_HOME/src/main/proto/github.com/tendermint/tendermint/proto/tendermint/libs/types.proto cp $GOPATH/src/github.com/gogo/protobuf/gogoproto/gogo.proto \ $KVSTORE_HOME/src/main/proto/github.com/gogo/protobuf/gogoproto/gogo.proto ``` @@ -195,14 +207,14 @@ dependencies { To generate all protobuf-type classes run: -```sh +```bash ./gradlew generateProto ``` To verify that everything went smoothly, you can inspect the `build/generated/` directory: -```sh -$ tree build/generated/ +```bash +tree build/generated/ build/generated/ `-- source `-- proto @@ -410,7 +422,7 @@ the application's `Query` method. Applications are free to provide their own APIs. But by using Tendermint Core as a proxy, clients (including [light client -package](https://godoc.org/github.com/tendermint/tendermint/lite)) can leverage +package](https://godoc.org/github.com/tendermint/tendermint/light)) can leverage the unified API across different applications. Plus they won't have to call the otherwise separate Tendermint Core API for additional proofs. @@ -508,11 +520,11 @@ To create a default configuration, nodeKey and private validator files, let's execute `tendermint init`. But before we do that, we will need to install Tendermint Core. -```sh -$ rm -rf /tmp/example -$ cd $GOPATH/src/github.com/tendermint/tendermint -$ make install -$ TMHOME="/tmp/example" tendermint init +```bash +rm -rf /tmp/example +cd $GOPATH/src/github.com/tendermint/tendermint +make install +TMHOME="/tmp/example" tendermint init I[2019-07-16|18:20:36.480] Generated private validator module=main keyFile=/tmp/example/config/priv_validator_key.json stateFile=/tmp/example2/data/priv_validator_state.json I[2019-07-16|18:20:36.481] Generated node key module=main path=/tmp/example/config/node_key.json @@ -525,7 +537,7 @@ Feel free to explore the generated files, which can be found at We are ready to start our application: -```sh +```bash ./gradlew run gRPC server started, listening on 26658 @@ -534,8 +546,8 @@ gRPC server started, listening on 26658 Then we need to start Tendermint Core and point it to our application. Staying within the application directory execute: -```sh -$ TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 +```bash +TMHOME="/tmp/example" tendermint node --abci grpc --proxy_app tcp://127.0.0.1:26658 I[2019-07-28|15:44:53.632] Version info module=main software=0.32.1 block=10 p2p=7 I[2019-07-28|15:44:53.677] Starting Node module=main impl=Node @@ -546,8 +558,8 @@ I[2019-07-28|15:44:54.814] Committed state module=s Now open another tab in your terminal and try sending a transaction: -```sh -$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +```bash +curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { "jsonrpc": "2.0", "id": "", @@ -565,8 +577,8 @@ Response should contain the height where this transaction was committed. Now let's check if the given key now exists and its value: -```sh -$ curl -s 'localhost:26657/abci_query?data="tendermint"' +```bash +curl -s 'localhost:26657/abci_query?data="tendermint"' { "jsonrpc": "2.0", "id": "", diff --git a/docs/guides/readme.md b/docs/tutorials/readme.md similarity index 100% rename from docs/guides/readme.md rename to docs/tutorials/readme.md diff --git a/docs/versions b/docs/versions index 1f7391f92..f3f5734d1 100644 --- a/docs/versions +++ b/docs/versions @@ -1 +1,3 @@ -master +v0.32 v0.32 +cyrus/0.33-version v0.33 +master master diff --git a/dredd.yml b/dredd.yml index ba315bb90..66487e670 100644 --- a/dredd.yml +++ b/dredd.yml @@ -29,5 +29,5 @@ hooks-worker-handler-host: 127.0.0.1 hooks-worker-handler-port: 61321 config: ./dredd.yml # This path accepts no variables -blueprint: ./rpc/swagger/swagger.yaml +blueprint: ./rpc/openapi/openapi.yaml endpoint: "http://127.0.0.1:26657/" diff --git a/evidence/codec.go b/evidence/codec.go deleted file mode 100644 index 650a34607..000000000 --- a/evidence/codec.go +++ /dev/null @@ -1,21 +0,0 @@ -package evidence - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterMessages(cdc) - cryptoamino.RegisterAmino(cdc) - types.RegisterEvidences(cdc) -} - -// For testing purposes only -func RegisterMockEvidences() { - types.RegisterMockEvidences(cdc) -} diff --git a/evidence/doc.go b/evidence/doc.go new file mode 100644 index 000000000..d521debd3 --- /dev/null +++ b/evidence/doc.go @@ -0,0 +1,53 @@ +/* +Package evidence handles all evidence storage and gossiping from detection to block proposal. +For the different types of evidence refer to the `evidence.go` file in the types package +or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md. + +Gossiping + +The core functionality begins with the evidence reactor (see reactor. +go) which operates both the sending and receiving of evidence. + +The `Receive` function takes a list of evidence and does the following: + +1. Checks that it does not already have the evidence stored + +2. Verifies the evidence against the node's state (see state/validation.go#VerifyEvidence) + +3. Stores the evidence to a db and a concurrent list + +The gossiping of evidence is initiated when a peer is added which starts a go routine to broadcast currently +uncommitted evidence at intervals of 60 seconds (set by the by broadcastEvidenceIntervalS). +It uses a concurrent list to store the evidence and before sending verifies that each evidence is still valid in the +sense that it has not exceeded the max evidence age and height (see types/params.go#EvidenceParams). + +There are two buckets that evidence can be stored in: Pending & Committed. + +1. Pending is awaiting to be committed (evidence is usually broadcasted then) + +2. Committed is for those already on the block and is to ensure that evidence isn't submitted twice + +All evidence is proto encoded to disk. + +Proposing + +When a new block is being proposed (in state/execution.go#CreateProposalBlock), +`PendingEvidence(maxBytes)` is called to send up to the maxBytes of uncommitted evidence, from the evidence store, +prioritized in order of age. All evidence is checked for expiration. + +When a node receives evidence in a block it will use the evidence module as a cache first to see if it has +already verified the evidence before trying to verify it again. + +Once the proposed evidence is submitted, +the evidence is marked as committed and is moved from the broadcasted set to the committed set. +As a result it is also removed from the concurrent list so that it is no longer gossiped. + +Minor Functionality + +As all evidence (including POLC's) are bounded by an expiration date, those that exceed this are no longer needed +and hence pruned. Currently, only committed evidence in which a marker to the height that the evidence was committed +and hence very small is saved. All updates are made from the `Update(block, state)` function which should be called +when a new block is committed. + +*/ +package evidence diff --git a/evidence/errors.go b/evidence/errors.go deleted file mode 100644 index 7bad19c81..000000000 --- a/evidence/errors.go +++ /dev/null @@ -1,21 +0,0 @@ -package evidence - -import ( - "fmt" -) - -// ErrInvalidEvidence returns when evidence failed to validate -type ErrInvalidEvidence struct { - Reason error -} - -func (e ErrInvalidEvidence) Error() string { - return fmt.Sprintf("evidence is not valid: %v ", e.Reason) -} - -// ErrEvidenceAlreadyStored indicates that the evidence has already been stored in the evidence db -type ErrEvidenceAlreadyStored struct{} - -func (e ErrEvidenceAlreadyStored) Error() string { - return "evidence is already stored" -} diff --git a/evidence/mocks/block_store.go b/evidence/mocks/block_store.go new file mode 100644 index 000000000..3414e9952 --- /dev/null +++ b/evidence/mocks/block_store.go @@ -0,0 +1,45 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + types "github.com/tendermint/tendermint/types" +) + +// BlockStore is an autogenerated mock type for the BlockStore type +type BlockStore struct { + mock.Mock +} + +// LoadBlockCommit provides a mock function with given fields: height +func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { + ret := _m.Called(height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + return r0 +} + +// LoadBlockMeta provides a mock function with given fields: height +func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { + ret := _m.Called(height) + + var r0 *types.BlockMeta + if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlockMeta) + } + } + + return r0 +} diff --git a/evidence/pool.go b/evidence/pool.go index 68967ede1..77dbf1a39 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -1,50 +1,227 @@ package evidence import ( + "bytes" + "errors" "fmt" + "sort" "sync" + "sync/atomic" "time" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" dbm "github.com/tendermint/tm-db" clist "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) -// Pool maintains a pool of valid evidence -// in an Store. +const ( + baseKeyCommitted = byte(0x00) + baseKeyPending = byte(0x01) +) + +// Pool maintains a pool of valid evidence to be broadcasted and committed type Pool struct { logger log.Logger - store *Store - evidenceList *clist.CList // concurrent linked-list of evidence + evidenceStore dbm.DB + evidenceList *clist.CList // concurrent linked-list of evidence + evidenceSize uint32 // amount of pending evidence // needed to load validators to verify evidence - stateDB dbm.DB + stateDB sm.Store + // needed to load headers and commits to verify evidence + blockStore BlockStore + mtx sync.Mutex // latest state - mtx sync.Mutex state sm.State + + pruningHeight int64 + pruningTime time.Time +} + +// NewPool creates an evidence pool. If using an existing evidence store, +// it will add all pending evidence to the concurrent list. +func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { + + state, err := stateDB.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } + + pool := &Pool{ + stateDB: stateDB, + blockStore: blockStore, + state: state, + logger: log.NewNopLogger(), + evidenceStore: evidenceDB, + evidenceList: clist.New(), + } + + // if pending evidence already in db, in event of prior failure, then check for expiration, + // update the size and load it back to the evidenceList + pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence() + evList, _, err := pool.listEvidence(baseKeyPending, -1) + if err != nil { + return nil, err + } + atomic.StoreUint32(&pool.evidenceSize, uint32(len(evList))) + for _, ev := range evList { + pool.evidenceList.PushBack(ev) + } + + return pool, nil +} + +// PendingEvidence is used primarily as part of block proposal and returns up to maxNum of uncommitted evidence. +func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { + if evpool.Size() == 0 { + return []types.Evidence{}, 0 + } + evidence, size, err := evpool.listEvidence(baseKeyPending, maxBytes) + if err != nil { + evpool.logger.Error("Unable to retrieve pending evidence", "err", err) + } + return evidence, size +} + +// Update pulls the latest state to be used for expiration and evidence params and then prunes all expired evidence +func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { + // sanity check + if state.LastBlockHeight <= evpool.state.LastBlockHeight { + panic(fmt.Sprintf( + "Failed EvidencePool.Update new state height is less than or equal to previous state height: %d <= %d", + state.LastBlockHeight, + evpool.state.LastBlockHeight, + )) + } + evpool.logger.Info("Updating evidence pool", "last_block_height", state.LastBlockHeight, + "last_block_time", state.LastBlockTime) + + // update the state + evpool.updateState(state) + + evpool.markEvidenceAsCommitted(ev) + + // prune pending evidence when it has expired. This also updates when the next evidence will expire + if evpool.Size() > 0 && state.LastBlockHeight > evpool.pruningHeight && + state.LastBlockTime.After(evpool.pruningTime) { + evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence() + } +} + +// AddEvidence checks the evidence is valid and adds it to the pool. +func (evpool *Pool) AddEvidence(ev types.Evidence) error { + evpool.logger.Debug("Attempting to add evidence", "ev", ev) + + // We have already verified this piece of evidence - no need to do it again + if evpool.isPending(ev) { + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) + return nil + } + + // check that the evidence isn't already committed + if evpool.isCommitted(ev) { + // this can happen if the peer that sent us the evidence is behind so we shouldn't + // punish the peer. + evpool.logger.Debug("Evidence was already committed, ignoring this one", "ev", ev) + return nil + } + + // 1) Verify against state. + err := evpool.verify(ev) + if err != nil { + return types.NewErrInvalidEvidence(ev, err) + } + + // 2) Save to store. + if err := evpool.addPendingEvidence(ev); err != nil { + return fmt.Errorf("can't add evidence to pending list: %w", err) + } + + // 3) Add evidence to clist. + evpool.evidenceList.PushBack(ev) + + evpool.logger.Info("Verified new evidence of byzantine behavior", "evidence", ev) + + return nil } -func NewPool(stateDB, evidenceDB dbm.DB) *Pool { - store := NewStore(evidenceDB) - evpool := &Pool{ - stateDB: stateDB, - state: sm.LoadState(stateDB), - logger: log.NewNopLogger(), - store: store, - evidenceList: clist.New(), +// AddEvidenceFromConsensus should be exposed only to the consensus reactor so it can add evidence +// to the pool directly without the need for verification. +func (evpool *Pool) AddEvidenceFromConsensus(ev types.Evidence) error { + + // we already have this evidence, log this but don't return an error. + if evpool.isPending(ev) { + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) + return nil } - return evpool + + if err := evpool.addPendingEvidence(ev); err != nil { + return fmt.Errorf("can't add evidence to pending list: %w", err) + } + // add evidence to be gossiped with peers + evpool.evidenceList.PushBack(ev) + + evpool.logger.Info("Verified new evidence of byzantine behavior", "evidence", ev) + + return nil } +// CheckEvidence takes an array of evidence from a block and verifies all the evidence there. +// If it has already verified the evidence then it jumps to the next one. It ensures that no +// evidence has already been committed or is being proposed twice. It also adds any +// evidence that it doesn't currently have so that it can quickly form ABCI Evidence later. +func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { + hashes := make([][]byte, len(evList)) + for idx, ev := range evList { + + ok := evpool.fastCheck(ev) + + if !ok { + // check that the evidence isn't already committed + if evpool.isCommitted(ev) { + return &types.ErrInvalidEvidence{Evidence: ev, Reason: errors.New("evidence was already committed")} + } + + err := evpool.verify(ev) + if err != nil { + return &types.ErrInvalidEvidence{Evidence: ev, Reason: err} + } + + if err := evpool.addPendingEvidence(ev); err != nil { + // Something went wrong with adding the evidence but we already know it is valid + // hence we log an error and continue + evpool.logger.Error("Can't add evidence to pending list", "err", err, "ev", ev) + } + + evpool.logger.Info("Verified new evidence of byzantine behavior", "evidence", ev) + } + + // check for duplicate evidence. We cache hashes so we don't have to work them out again. + hashes[idx] = ev.Hash() + for i := idx - 1; i >= 0; i-- { + if bytes.Equal(hashes[i], hashes[idx]) { + return &types.ErrInvalidEvidence{Evidence: ev, Reason: errors.New("duplicate evidence")} + } + } + } + + return nil +} + +// EvidenceFront goes to the first evidence in the clist func (evpool *Pool) EvidenceFront() *clist.CElement { return evpool.evidenceList.Front() } +// EvidenceWaitChan is a channel that closes once the first evidence in the list is there. i.e Front is not nil func (evpool *Pool) EvidenceWaitChan() <-chan struct{} { return evpool.evidenceList.WaitChan() } @@ -54,15 +231,9 @@ func (evpool *Pool) SetLogger(l log.Logger) { evpool.logger = l } -// PriorityEvidence returns the priority evidence. -func (evpool *Pool) PriorityEvidence() []types.Evidence { - return evpool.store.PriorityEvidence() -} - -// PendingEvidence returns up to maxNum uncommitted evidence. -// If maxNum is -1, all evidence is returned. -func (evpool *Pool) PendingEvidence(maxNum int64) []types.Evidence { - return evpool.store.PendingEvidence(maxNum) +// Size returns the number of evidence in the pool. +func (evpool *Pool) Size() uint32 { + return atomic.LoadUint32(&evpool.evidenceSize) } // State returns the current state of the evpool. @@ -72,105 +243,287 @@ func (evpool *Pool) State() sm.State { return evpool.state } -// Update loads the latest -func (evpool *Pool) Update(block *types.Block, state sm.State) { +//-------------------------------------------------------------------------- - // sanity check - if state.LastBlockHeight != block.Height { - panic( - fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d", - state.LastBlockHeight, - block.Height, - ), - ) - } +// fastCheck leverages the fact that the evidence pool may have already verified the evidence to see if it can +// quickly conclude that the evidence is already valid. +func (evpool *Pool) fastCheck(ev types.Evidence) bool { + if lcae, ok := ev.(*types.LightClientAttackEvidence); ok { + key := keyPending(ev) + evBytes, err := evpool.evidenceStore.Get(key) + if evBytes == nil { // the evidence is not in the nodes pending list + return false + } + if err != nil { + evpool.logger.Error("Failed to load light client attack evidence", "err", err, "key(height/hash)", key) + return false + } + var trustedPb tmproto.LightClientAttackEvidence + err = trustedPb.Unmarshal(evBytes) + if err != nil { + evpool.logger.Error("Failed to convert light client attack evidence from bytes", + "err", err, "key(height/hash)", key) + return false + } + trustedEv, err := types.LightClientAttackEvidenceFromProto(&trustedPb) + if err != nil { + evpool.logger.Error("Failed to convert light client attack evidence from protobuf", + "err", err, "key(height/hash)", key) + return false + } + // ensure that all the byzantine validators that the evidence pool has match the byzantine validators + // in this evidence + if trustedEv.ByzantineValidators == nil && lcae.ByzantineValidators != nil { + return false + } - // update the state - evpool.mtx.Lock() - evpool.state = state - evpool.mtx.Unlock() + if len(trustedEv.ByzantineValidators) != len(lcae.ByzantineValidators) { + return false + } - // remove evidence from pending and mark committed - evpool.MarkEvidenceAsCommitted(block.Height, block.Time, block.Evidence.Evidence) -} + byzValsCopy := make([]*types.Validator, len(lcae.ByzantineValidators)) + for i, v := range lcae.ByzantineValidators { + byzValsCopy[i] = v.Copy() + } -// AddEvidence checks the evidence is valid and adds it to the pool. -func (evpool *Pool) AddEvidence(evidence types.Evidence) error { + // ensure that both validator arrays are in the same order + sort.Sort(types.ValidatorsByVotingPower(byzValsCopy)) - // check if evidence is already stored - if evpool.store.Has(evidence) { - return ErrEvidenceAlreadyStored{} + for idx, val := range trustedEv.ByzantineValidators { + if !bytes.Equal(byzValsCopy[idx].Address, val.Address) { + return false + } + if byzValsCopy[idx].VotingPower != val.VotingPower { + return false + } + } + + return true } - if err := sm.VerifyEvidence(evpool.stateDB, evpool.State(), evidence); err != nil { - return ErrInvalidEvidence{err} + // for all other evidence the evidence pool just checks if it is already in the pending db + return evpool.isPending(ev) +} + +// IsExpired checks whether evidence or a polc is expired by checking whether a height and time is older +// than set by the evidence consensus parameters +func (evpool *Pool) isExpired(height int64, time time.Time) bool { + var ( + params = evpool.State().ConsensusParams.Evidence + ageDuration = evpool.State().LastBlockTime.Sub(time) + ageNumBlocks = evpool.State().LastBlockHeight - height + ) + return ageNumBlocks > params.MaxAgeNumBlocks && + ageDuration > params.MaxAgeDuration +} + +// IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed. +func (evpool *Pool) isCommitted(evidence types.Evidence) bool { + key := keyCommitted(evidence) + ok, err := evpool.evidenceStore.Has(key) + if err != nil { + evpool.logger.Error("Unable to find committed evidence", "err", err) } + return ok +} - // fetch the validator and return its voting power as its priority - // TODO: something better ? - valset, err := sm.LoadValidators(evpool.stateDB, evidence.Height()) +// IsPending checks whether the evidence is already pending. DB errors are passed to the logger. +func (evpool *Pool) isPending(evidence types.Evidence) bool { + key := keyPending(evidence) + ok, err := evpool.evidenceStore.Has(key) if err != nil { - return err + evpool.logger.Error("Unable to find pending evidence", "err", err) } - _, val := valset.GetByAddress(evidence.Address()) - priority := val.VotingPower + return ok +} - _, err = evpool.store.AddNewEvidence(evidence, priority) +func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { + evpb, err := types.EvidenceToProto(ev) if err != nil { - return err + return fmt.Errorf("unable to convert to proto, err: %w", err) } - evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence) + evBytes, err := evpb.Marshal() + if err != nil { + return fmt.Errorf("unable to marshal evidence: %w", err) + } - // add evidence to clist - evpool.evidenceList.PushBack(evidence) + key := keyPending(ev) + err = evpool.evidenceStore.Set(key, evBytes) + if err != nil { + return fmt.Errorf("can't persist evidence: %w", err) + } + atomic.AddUint32(&evpool.evidenceSize, 1) return nil } -// MarkEvidenceAsCommitted marks all the evidence as committed and removes it from the queue. -func (evpool *Pool) MarkEvidenceAsCommitted(height int64, lastBlockTime time.Time, evidence []types.Evidence) { - // make a map of committed evidence to remove from the clist - blockEvidenceMap := make(map[string]struct{}) +func (evpool *Pool) removePendingEvidence(evidence types.Evidence) { + key := keyPending(evidence) + if err := evpool.evidenceStore.Delete(key); err != nil { + evpool.logger.Error("Unable to delete pending evidence", "err", err) + } else { + atomic.AddUint32(&evpool.evidenceSize, ^uint32(0)) + evpool.logger.Info("Deleted pending evidence", "evidence", evidence) + } +} + +// markEvidenceAsCommitted processes all the evidence in the block, marking it as +// committed and removing it from the pending database. +func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) { + blockEvidenceMap := make(map[string]struct{}, len(evidence)) for _, ev := range evidence { - evpool.store.MarkEvidenceAsCommitted(ev) - blockEvidenceMap[evMapKey(ev)] = struct{}{} + if evpool.isPending(ev) { + evpool.removePendingEvidence(ev) + blockEvidenceMap[evMapKey(ev)] = struct{}{} + } + + // Add evidence to the committed list. As the evidence is stored in the block store + // we only need to record the height that it was saved at. + key := keyCommitted(ev) + + h := gogotypes.Int64Value{Value: ev.Height()} + evBytes, err := proto.Marshal(&h) + if err != nil { + evpool.logger.Error("failed to marshal committed evidence", "err", err, "key(height/hash)", key) + continue + } + + if err := evpool.evidenceStore.Set(key, evBytes); err != nil { + evpool.logger.Error("Unable to save committed evidence", "err", err, "key(height/hash)", key) + } } // remove committed evidence from the clist - evidenceParams := evpool.State().ConsensusParams.Evidence - evpool.removeEvidence(height, lastBlockTime, evidenceParams, blockEvidenceMap) + if len(blockEvidenceMap) != 0 { + evpool.removeEvidenceFromList(blockEvidenceMap) + } } -// IsCommitted returns true if we have already seen this exact evidence and it is already marked as committed. -func (evpool *Pool) IsCommitted(evidence types.Evidence) bool { - ei := evpool.store.getInfo(evidence) - return ei.Evidence != nil && ei.Committed +// listEvidence retrieves lists evidence from oldest to newest within maxBytes. +// If maxBytes is -1, there's no cap on the size of returned evidence. +func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Evidence, int64, error) { + var ( + evSize int64 + totalSize int64 + evidence []types.Evidence + evList tmproto.EvidenceList // used for calculating the bytes size + ) + + iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{prefixKey}) + if err != nil { + return nil, totalSize, fmt.Errorf("database error: %v", err) + } + defer iter.Close() + for ; iter.Valid(); iter.Next() { + var evpb tmproto.Evidence + err := evpb.Unmarshal(iter.Value()) + if err != nil { + return evidence, totalSize, err + } + evList.Evidence = append(evList.Evidence, evpb) + evSize = int64(evList.Size()) + if maxBytes != -1 && evSize > maxBytes { + if err := iter.Error(); err != nil { + return evidence, totalSize, err + } + return evidence, totalSize, nil + } + + ev, err := types.EvidenceFromProto(&evpb) + if err != nil { + return nil, totalSize, err + } + + totalSize = evSize + evidence = append(evidence, ev) + } + + if err := iter.Error(); err != nil { + return evidence, totalSize, err + } + return evidence, totalSize, nil +} + +func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { + iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{baseKeyPending}) + if err != nil { + evpool.logger.Error("Unable to iterate over pending evidence", "err", err) + return evpool.State().LastBlockHeight, evpool.State().LastBlockTime + } + defer iter.Close() + blockEvidenceMap := make(map[string]struct{}) + for ; iter.Valid(); iter.Next() { + ev, err := bytesToEv(iter.Value()) + if err != nil { + evpool.logger.Error("Error in transition evidence from protobuf", "err", err) + continue + } + if !evpool.isExpired(ev.Height(), ev.Time()) { + if len(blockEvidenceMap) != 0 { + evpool.removeEvidenceFromList(blockEvidenceMap) + } + + // return the height and time with which this evidence will have expired so we know when to prune next + return ev.Height() + evpool.State().ConsensusParams.Evidence.MaxAgeNumBlocks + 1, + ev.Time().Add(evpool.State().ConsensusParams.Evidence.MaxAgeDuration).Add(time.Second) + } + evpool.removePendingEvidence(ev) + blockEvidenceMap[evMapKey(ev)] = struct{}{} + } + // We either have no pending evidence or all evidence has expired + if len(blockEvidenceMap) != 0 { + evpool.removeEvidenceFromList(blockEvidenceMap) + } + return evpool.State().LastBlockHeight, evpool.State().LastBlockTime } -func (evpool *Pool) removeEvidence( - height int64, - lastBlockTime time.Time, - params types.EvidenceParams, +func (evpool *Pool) removeEvidenceFromList( blockEvidenceMap map[string]struct{}) { for e := evpool.evidenceList.Front(); e != nil; e = e.Next() { - var ( - ev = e.Value.(types.Evidence) - ageDuration = lastBlockTime.Sub(ev.Time()) - ageNumBlocks = height - ev.Height() - ) - - // Remove the evidence if it's already in a block or if it's now too old. - if _, ok := blockEvidenceMap[evMapKey(ev)]; ok || - (ageDuration > params.MaxAgeDuration && ageNumBlocks > params.MaxAgeNumBlocks) { - // remove from clist + // Remove from clist + ev := e.Value.(types.Evidence) + if _, ok := blockEvidenceMap[evMapKey(ev)]; ok { evpool.evidenceList.Remove(e) e.DetachPrev() } } } +func (evpool *Pool) updateState(state sm.State) { + evpool.mtx.Lock() + defer evpool.mtx.Unlock() + evpool.state = state +} + +func bytesToEv(evBytes []byte) (types.Evidence, error) { + var evpb tmproto.Evidence + err := evpb.Unmarshal(evBytes) + if err != nil { + return &types.DuplicateVoteEvidence{}, err + } + + return types.EvidenceFromProto(&evpb) +} + func evMapKey(ev types.Evidence) string { return string(ev.Hash()) } + +// big endian padded hex +func bE(h int64) string { + return fmt.Sprintf("%0.16X", h) +} + +func keyCommitted(evidence types.Evidence) []byte { + return append([]byte{baseKeyCommitted}, keySuffix(evidence)...) +} + +func keyPending(evidence types.Evidence) []byte { + return append([]byte{baseKeyPending}, keySuffix(evidence)...) +} + +func keySuffix(evidence types.Evidence) []byte { + return []byte(fmt.Sprintf("%s/%X", bE(evidence.Height()), evidence.Hash())) +} diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 97694d1ff..046f4efc5 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -1,151 +1,449 @@ -package evidence +package evidence_test import ( "os" - "sync" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/evidence" + "github.com/tendermint/tendermint/evidence/mocks" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" sm "github.com/tendermint/tendermint/state" + smmocks "github.com/tendermint/tendermint/state/mocks" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) func TestMain(m *testing.M) { - types.RegisterMockEvidences(cdc) code := m.Run() os.Exit(code) } -func initializeValidatorState(valAddr []byte, height int64) dbm.DB { - stateDB := dbm.NewMemDB() - - // create validator set and state - valSet := &types.ValidatorSet{ - Validators: []*types.Validator{ - {Address: valAddr}, - }, - } - state := sm.State{ - LastBlockHeight: 0, - LastBlockTime: tmtime.Now(), - Validators: valSet, - NextValidators: valSet.CopyIncrementProposerPriority(1), - LastHeightValidatorsChanged: 1, - ConsensusParams: types.ConsensusParams{ - Evidence: types.EvidenceParams{ - MaxAgeNumBlocks: 10000, - MaxAgeDuration: 48 * time.Hour, - }, - }, - } +const evidenceChainID = "test_chain" - // save all states up to height - for i := int64(0); i < height; i++ { - state.LastBlockHeight = i - sm.SaveState(stateDB, state) - } +var ( + defaultEvidenceTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + defaultEvidenceMaxBytes int64 = 1000 +) - return stateDB -} +func TestEvidencePoolBasic(t *testing.T) { + var ( + height = int64(1) + stateStore = &smmocks.Store{} + evidenceDB = dbm.NewMemDB() + blockStore = &mocks.BlockStore{} + ) -func TestEvidencePool(t *testing.T) { + valSet, privVals := types.RandValidatorSet(1, 10) - var ( - valAddr = []byte("val1") - height = int64(100002) - stateDB = initializeValidatorState(valAddr, height) - evidenceDB = dbm.NewMemDB() - pool = NewPool(stateDB, evidenceDB) - evidenceTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( + &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, ) + stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) + stateStore.On("Load").Return(createState(height+1, valSet), nil) - goodEvidence := types.NewMockEvidence(height, time.Now(), 0, valAddr) - badEvidence := types.NewMockEvidence(1, evidenceTime, 0, valAddr) + pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) - // bad evidence - err := pool.AddEvidence(badEvidence) - assert.Error(t, err) - // err: evidence created at 2019-01-01 00:00:00 +0000 UTC has expired. Evidence can not be older than: ... + // evidence not seen yet: + evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Equal(t, 0, len(evs)) + assert.Zero(t, size) + + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, privVals[0], evidenceChainID) - var wg sync.WaitGroup - wg.Add(1) + // good evidence + evAdded := make(chan struct{}) go func() { <-pool.EvidenceWaitChan() - wg.Done() + close(evAdded) }() - err = pool.AddEvidence(goodEvidence) - assert.NoError(t, err) - wg.Wait() + // evidence seen but not yet committed: + assert.NoError(t, pool.AddEvidence(ev)) - assert.Equal(t, 1, pool.evidenceList.Len()) + select { + case <-evAdded: + case <-time.After(5 * time.Second): + t.Fatal("evidence was not added to list after 5s") + } - // if we send it again, it shouldnt add and return an error - err = pool.AddEvidence(goodEvidence) - assert.Error(t, err) - assert.Equal(t, 1, pool.evidenceList.Len()) -} + next := pool.EvidenceFront() + assert.Equal(t, ev, next.Value.(types.Evidence)) -func TestEvidencePoolIsCommitted(t *testing.T) { - // Initialization: - var ( - valAddr = []byte("validator_address") - height = int64(42) - lastBlockTime = time.Now() - stateDB = initializeValidatorState(valAddr, height) - evidenceDB = dbm.NewMemDB() - pool = NewPool(stateDB, evidenceDB) - ) - - // evidence not seen yet: - evidence := types.NewMockEvidence(height, time.Now(), 0, valAddr) - assert.False(t, pool.IsCommitted(evidence)) + const evidenceBytes int64 = 372 + evs, size = pool.PendingEvidence(evidenceBytes) + assert.Equal(t, 1, len(evs)) + assert.Equal(t, evidenceBytes, size) // check that the size of the single evidence in bytes is correct - // evidence seen but not yet committed: - assert.NoError(t, pool.AddEvidence(evidence)) - assert.False(t, pool.IsCommitted(evidence)) + // shouldn't be able to add evidence twice + assert.NoError(t, pool.AddEvidence(ev)) + evs, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Equal(t, 1, len(evs)) - // evidence seen and committed: - pool.MarkEvidenceAsCommitted(height, lastBlockTime, []types.Evidence{evidence}) - assert.True(t, pool.IsCommitted(evidence)) } -func TestAddEvidence(t *testing.T) { - +// Tests inbound evidence for the right time and height +func TestAddExpiredEvidence(t *testing.T) { var ( - valAddr = []byte("val1") - height = int64(100002) - stateDB = initializeValidatorState(valAddr, height) - evidenceDB = dbm.NewMemDB() - pool = NewPool(stateDB, evidenceDB) - evidenceTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + val = types.NewMockPV() + height = int64(30) + stateStore = initializeValidatorState(val, height) + evidenceDB = dbm.NewMemDB() + blockStore = &mocks.BlockStore{} + expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) + expiredHeight = int64(2) ) + blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta { + if h == height || h == expiredHeight { + return &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}} + } + return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}} + }) + + pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.NoError(t, err) + testCases := []struct { evHeight int64 evTime time.Time expErr bool evDescription string }{ - {height, time.Now(), false, "valid evidence"}, - {height, evidenceTime, false, "valid evidence (despite old time)"}, - {int64(1), time.Now(), false, "valid evidence (despite old height)"}, - {int64(1), evidenceTime, true, + {height, defaultEvidenceTime, false, "valid evidence"}, + {expiredHeight, defaultEvidenceTime, false, "valid evidence (despite old height)"}, + {height - 1, expiredEvidenceTime, false, "valid evidence (despite old time)"}, + {expiredHeight - 1, expiredEvidenceTime, true, "evidence from height 1 (created at: 2019-01-01 00:00:00 +0000 UTC) is too old"}, + {height, defaultEvidenceTime.Add(1 * time.Minute), true, "evidence time and block time is different"}, } for _, tc := range testCases { tc := tc - ev := types.NewMockEvidence(tc.evHeight, tc.evTime, 0, valAddr) - err := pool.AddEvidence(ev) - if tc.expErr { - assert.Error(t, err) + t.Run(tc.evDescription, func(t *testing.T) { + ev := types.NewMockDuplicateVoteEvidenceWithValidator(tc.evHeight, tc.evTime, val, evidenceChainID) + err := pool.AddEvidence(ev) + if tc.expErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestAddEvidenceFromConsensus(t *testing.T) { + var height int64 = 10 + pool, val := defaultTestPool(height) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) + err := pool.AddEvidenceFromConsensus(ev) + assert.NoError(t, err) + next := pool.EvidenceFront() + assert.Equal(t, ev, next.Value.(types.Evidence)) + + // shouldn't be able to submit the same evidence twice + err = pool.AddEvidenceFromConsensus(ev) + assert.NoError(t, err) + evs, _ := pool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Equal(t, 1, len(evs)) +} + +func TestEvidencePoolUpdate(t *testing.T) { + height := int64(21) + pool, val := defaultTestPool(height) + state := pool.State() + + // create new block (no need to save it to blockStore) + prunedEv := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultEvidenceTime.Add(1*time.Minute), + val, evidenceChainID) + err := pool.AddEvidence(prunedEv) + require.NoError(t, err) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(21*time.Minute), + val, evidenceChainID) + lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) + block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev}) + // update state (partially) + state.LastBlockHeight = height + 1 + state.LastBlockTime = defaultEvidenceTime.Add(22 * time.Minute) + err = pool.CheckEvidence(types.EvidenceList{ev}) + require.NoError(t, err) + + pool.Update(state, block.Evidence.Evidence) + // a) Update marks evidence as committed so pending evidence should be empty + evList, evSize := pool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Empty(t, evList) + assert.Zero(t, evSize) + + // b) If we try to check this evidence again it should fail because it has already been committed + err = pool.CheckEvidence(types.EvidenceList{ev}) + if assert.Error(t, err) { + assert.Equal(t, "evidence was already committed", err.(*types.ErrInvalidEvidence).Reason.Error()) + } +} + +func TestVerifyPendingEvidencePasses(t *testing.T) { + var height int64 = 1 + pool, val := defaultTestPool(height) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(1*time.Minute), + val, evidenceChainID) + err := pool.AddEvidence(ev) + require.NoError(t, err) + + err = pool.CheckEvidence(types.EvidenceList{ev}) + assert.NoError(t, err) +} + +func TestVerifyDuplicatedEvidenceFails(t *testing.T) { + var height int64 = 1 + pool, val := defaultTestPool(height) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime.Add(1*time.Minute), + val, evidenceChainID) + err := pool.CheckEvidence(types.EvidenceList{ev, ev}) + if assert.Error(t, err) { + assert.Equal(t, "duplicate evidence", err.(*types.ErrInvalidEvidence).Reason.Error()) + } +} + +// check that valid light client evidence is correctly validated and stored in +// evidence pool +func TestCheckEvidenceWithLightClientAttack(t *testing.T) { + var ( + nValidators = 5 + validatorPower int64 = 10 + height int64 = 10 + ) + conflictingVals, conflictingPrivVals := types.RandValidatorSet(nValidators, validatorPower) + trustedHeader := makeHeaderRandom(height) + trustedHeader.Time = defaultEvidenceTime + + conflictingHeader := makeHeaderRandom(height) + conflictingHeader.ValidatorsHash = conflictingVals.Hash() + + trustedHeader.ValidatorsHash = conflictingHeader.ValidatorsHash + trustedHeader.NextValidatorsHash = conflictingHeader.NextValidatorsHash + trustedHeader.ConsensusHash = conflictingHeader.ConsensusHash + trustedHeader.AppHash = conflictingHeader.AppHash + trustedHeader.LastResultsHash = conflictingHeader.LastResultsHash + + // for simplicity we are simulating a duplicate vote attack where all the validators in the + // conflictingVals set voted twice + blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) + voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + commit, err := types.MakeCommit(blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) + require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: conflictingHeader, + Commit: commit, + }, + ValidatorSet: conflictingVals, + }, + CommonHeight: 10, + TotalVotingPower: int64(nValidators) * validatorPower, + ByzantineValidators: conflictingVals.Validators, + Timestamp: defaultEvidenceTime, + } + + trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) + trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedCommit, err := types.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, + defaultEvidenceTime) + require.NoError(t, err) + + state := sm.State{ + LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), + LastBlockHeight: 11, + ConsensusParams: *types.DefaultConsensusParams(), + } + stateStore := &smmocks.Store{} + stateStore.On("LoadValidators", height).Return(conflictingVals, nil) + stateStore.On("Load").Return(state, nil) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockCommit", height).Return(trustedCommit) + + pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) + + err = pool.AddEvidence(ev) + assert.NoError(t, err) + + err = pool.CheckEvidence(types.EvidenceList{ev}) + assert.NoError(t, err) + + // take away the last signature -> there are less validators then what we have detected, + // hence this should fail + commit.Signatures = append(commit.Signatures[:nValidators-1], types.NewCommitSigAbsent()) + err = pool.CheckEvidence(types.EvidenceList{ev}) + assert.Error(t, err) +} + +// Tests that restarting the evidence pool after a potential failure will recover the +// pending evidence and continue to gossip it +func TestRecoverPendingEvidence(t *testing.T) { + height := int64(10) + val := types.NewMockPV() + valAddress := val.PrivKey.PubKey().Address() + evidenceDB := dbm.NewMemDB() + stateStore := initializeValidatorState(val, height) + state, err := stateStore.Load() + require.NoError(t, err) + blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + // create previous pool and populate it + pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) + goodEvidence := types.NewMockDuplicateVoteEvidenceWithValidator(height, + defaultEvidenceTime.Add(10*time.Minute), val, evidenceChainID) + expiredEvidence := types.NewMockDuplicateVoteEvidenceWithValidator(int64(1), + defaultEvidenceTime.Add(1*time.Minute), val, evidenceChainID) + err = pool.AddEvidence(goodEvidence) + require.NoError(t, err) + err = pool.AddEvidence(expiredEvidence) + require.NoError(t, err) + + // now recover from the previous pool at a different time + newStateStore := &smmocks.Store{} + newStateStore.On("Load").Return(sm.State{ + LastBlockTime: defaultEvidenceTime.Add(25 * time.Minute), + LastBlockHeight: height + 15, + ConsensusParams: tmproto.ConsensusParams{ + Block: tmproto.BlockParams{ + MaxBytes: 22020096, + MaxGas: -1, + }, + Evidence: tmproto.EvidenceParams{ + MaxAgeNumBlocks: 20, + MaxAgeDuration: 20 * time.Minute, + MaxBytes: 1000, + }, + }, + }, nil) + newPool, err := evidence.NewPool(evidenceDB, newStateStore, blockStore) + assert.NoError(t, err) + evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes) + assert.Equal(t, 1, len(evList)) + next := newPool.EvidenceFront() + assert.Equal(t, goodEvidence, next.Value.(types.Evidence)) + +} + +func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store { + stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) + state := sm.State{ + ChainID: evidenceChainID, + InitialHeight: 1, + LastBlockHeight: height, + LastBlockTime: defaultEvidenceTime, + Validators: valSet, + NextValidators: valSet.CopyIncrementProposerPriority(1), + LastValidators: valSet, + LastHeightValidatorsChanged: 1, + ConsensusParams: tmproto.ConsensusParams{ + Block: tmproto.BlockParams{ + MaxBytes: 22020096, + MaxGas: -1, + }, + Evidence: tmproto.EvidenceParams{ + MaxAgeNumBlocks: 20, + MaxAgeDuration: 20 * time.Minute, + MaxBytes: 1000, + }, + }, + } + + // save all states up to height + for i := int64(0); i <= height; i++ { + state.LastBlockHeight = i + if err := stateStore.Save(state); err != nil { + panic(err) } } + + return stateStore +} + +func initializeValidatorState(privVal types.PrivValidator, height int64) sm.Store { + + pubKey, _ := privVal.GetPubKey() + validator := &types.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey} + + // create validator set and state + valSet := &types.ValidatorSet{ + Validators: []*types.Validator{validator}, + Proposer: validator, + } + + return initializeStateFromValidatorSet(valSet, height) +} + +// initializeBlockStore creates a block storage and populates it w/ a dummy +// block at +height+. +func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.BlockStore { + blockStore := store.NewBlockStore(db) + + for i := int64(1); i <= state.LastBlockHeight; i++ { + lastCommit := makeCommit(i-1, valAddr) + block, _ := state.MakeBlock(i, []types.Tx{}, lastCommit, nil, + state.Validators.GetProposer().Address) + block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) + block.Header.Version = tmversion.Consensus{Block: version.BlockProtocol, App: 1} + const parts = 1 + partSet := block.MakePartSet(parts) + + seenCommit := makeCommit(i, valAddr) + blockStore.SaveBlock(block, partSet, seenCommit) + } + + return blockStore +} + +func makeCommit(height int64, valAddr []byte) *types.Commit { + commitSigs := []types.CommitSig{{ + BlockIDFlag: types.BlockIDFlagCommit, + ValidatorAddress: valAddr, + Timestamp: defaultEvidenceTime, + Signature: []byte("Signature"), + }} + return types.NewCommit(height, 0, types.BlockID{}, commitSigs) +} + +func defaultTestPool(height int64) (*evidence.Pool, types.MockPV) { + val := types.NewMockPV() + valAddress := val.PrivKey.PubKey().Address() + evidenceDB := dbm.NewMemDB() + stateStore := initializeValidatorState(val, height) + state, _ := stateStore.Load() + blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + if err != nil { + panic("test evidence pool could not be created") + } + pool.SetLogger(log.TestingLogger()) + return pool, val +} + +func createState(height int64, valSet *types.ValidatorSet) sm.State { + return sm.State{ + ChainID: evidenceChainID, + LastBlockHeight: height, + LastBlockTime: defaultEvidenceTime, + Validators: valSet, + ConsensusParams: *types.DefaultConsensusParams(), + } } diff --git a/evidence/reactor.go b/evidence/reactor.go index 26343638a..951d64d71 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -2,14 +2,12 @@ package evidence import ( "fmt" - "reflect" "time" - amino "github.com/tendermint/go-amino" - clist "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -18,8 +16,13 @@ const ( maxMsgSize = 1048576 // 1MB TODO make it configurable - broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often - peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount + // broadcast all uncommitted evidence this often. This sets when the reactor + // goes back to the start of the list and begins sending the evidence again. + // Most evidence should be committed in the very next block that is why we wait + // just over the block production rate before sending evidence again. + broadcastEvidenceIntervalS = 10 + // If a message fails wait this much before sending it again + peerRetryMessageIntervalMS = 100 ) // Reactor handles evpool evidence broadcasting amongst peers. @@ -49,8 +52,9 @@ func (evR *Reactor) SetLogger(l log.Logger) { func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ { - ID: EvidenceChannel, - Priority: 5, + ID: EvidenceChannel, + Priority: 5, + RecvMessageCapacity: maxMsgSize, }, } } @@ -62,42 +66,29 @@ func (evR *Reactor) AddPeer(peer p2p.Peer) { // Receive implements Reactor. // It adds any received evidence to the evpool. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) + evis, err := decodeMsg(msgBytes) if err != nil { - evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err, "bytes", msgBytes) evR.Switch.StopPeerForError(src, err) return } - if err = msg.ValidateBasic(); err != nil { - evR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) - evR.Switch.StopPeerForError(src, err) - return - } - - evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *ListMessage: - for _, ev := range msg.Evidence { - err := evR.evpool.AddEvidence(ev) - switch err.(type) { - case ErrInvalidEvidence: - evR.Logger.Error("Evidence is not valid", "evidence", msg.Evidence, "err", err) - // punish peer - evR.Switch.StopPeerForError(src, err) - return - case ErrEvidenceAlreadyStored: - evR.Logger.Debug("Evidence already exists", "evidence", msg.Evidence) - case nil: - default: - evR.Logger.Error("Evidence has not been added", "evidence", msg.Evidence, "err", err) - return - } + for _, ev := range evis { + err := evR.evpool.AddEvidence(ev) + switch err.(type) { + case *types.ErrInvalidEvidence: + evR.Logger.Error(err.Error()) + // punish peer + evR.Switch.StopPeerForError(src, err) + return + case nil: + default: + // continue to the next piece of evidence + evR.Logger.Error("Evidence has not been added", "evidence", evis, "err", err) } - default: - evR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } @@ -132,15 +123,18 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) { } ev := next.Value.(types.Evidence) - msg, retry := evR.checkSendEvidenceMessage(peer, ev) - if msg != nil { - success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg)) - retry = !success - } - - if retry { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue + evis := evR.prepareEvidenceMessage(peer, ev) + if len(evis) > 0 { + msgBytes, err := encodeMsg(evis) + if err != nil { + panic(err) + } + evR.Logger.Debug("Gossiping evidence to peer", "ev", ev, "peer", peer.ID()) + success := peer.Send(EvidenceChannel, msgBytes) + if !success { + time.Sleep(peerRetryMessageIntervalMS * time.Millisecond) + continue + } } afterCh := time.After(time.Second * broadcastEvidenceIntervalS) @@ -160,12 +154,12 @@ func (evR *Reactor) broadcastEvidenceRoutine(peer p2p.Peer) { } } -// Returns the message to send the peer, or nil if the evidence is invalid for the peer. -// If message is nil, return true if we should sleep and try again. -func (evR Reactor) checkSendEvidenceMessage( +// Returns the message to send to the peer, or nil if the evidence is invalid for the peer. +// If message is nil, we should sleep and try again. +func (evR Reactor) prepareEvidenceMessage( peer p2p.Peer, ev types.Evidence, -) (msg Message, retry bool) { +) (evis []types.Evidence) { // make sure the peer is up to date evHeight := ev.Height() @@ -176,26 +170,20 @@ func (evR Reactor) checkSendEvidenceMessage( // different every time due to us using a map. Sometimes other reactors // will be initialized before the consensus reactor. We should wait a few // milliseconds and retry. - return nil, true + return nil } // NOTE: We only send evidence to peers where // peerHeight - maxAge < evidenceHeight < peerHeight - // and - // lastBlockTime - maxDuration < evidenceTime var ( - peerHeight = peerState.GetHeight() - - params = evR.evpool.State().ConsensusParams.Evidence - - ageDuration = evR.evpool.State().LastBlockTime.Sub(ev.Time()) + peerHeight = peerState.GetHeight() + params = evR.evpool.State().ConsensusParams.Evidence ageNumBlocks = peerHeight - evHeight ) - if peerHeight < evHeight { // peer is behind. sleep while he catches up - return nil, true - } else if ageNumBlocks > params.MaxAgeNumBlocks && - ageDuration > params.MaxAgeDuration { // evidence is too old, skip + if peerHeight <= evHeight { // peer is behind. sleep while he catches up + return nil + } else if ageNumBlocks > params.MaxAgeNumBlocks { // evidence is too old relative to the peer, skip // NOTE: if evidence is too old for an honest peer, then we're behind and // either it already got committed or it never will! @@ -204,17 +192,15 @@ func (evR Reactor) checkSendEvidenceMessage( "evHeight", evHeight, "maxAgeNumBlocks", params.MaxAgeNumBlocks, "lastBlockTime", evR.evpool.State().LastBlockTime, - "evTime", ev.Time(), "maxAgeDuration", params.MaxAgeDuration, "peer", peer, ) - return nil, false + return nil } // send evidence - msg = &ListMessage{[]types.Evidence{ev}} - return msg, false + return []types.Evidence{ev} } // PeerState describes the state of a peer. @@ -222,46 +208,46 @@ type PeerState interface { GetHeight() int64 } -//----------------------------------------------------------------------------- -// Messages - -// Message is a message sent or received by the Reactor. -type Message interface { - ValidateBasic() error -} +// encodemsg takes a array of evidence +// returns the byte encoding of the List Message +func encodeMsg(evis []types.Evidence) ([]byte, error) { + evi := make([]tmproto.Evidence, len(evis)) + for i := 0; i < len(evis); i++ { + ev, err := types.EvidenceToProto(evis[i]) + if err != nil { + return nil, err + } + evi[i] = *ev + } + epl := tmproto.EvidenceList{ + Evidence: evi, + } -func RegisterMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*Message)(nil), nil) - cdc.RegisterConcrete(&ListMessage{}, - "tendermint/evidence/ListMessage", nil) + return epl.Marshal() } -func decodeMsg(bz []byte) (msg Message, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) +// decodemsg takes an array of bytes +// returns an array of evidence +func decodeMsg(bz []byte) (evis []types.Evidence, err error) { + lm := tmproto.EvidenceList{} + if err := lm.Unmarshal(bz); err != nil { + return nil, err } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- -// ListMessage contains a list of evidence. -type ListMessage struct { - Evidence []types.Evidence -} + evis = make([]types.Evidence, len(lm.Evidence)) + for i := 0; i < len(lm.Evidence); i++ { + ev, err := types.EvidenceFromProto(&lm.Evidence[i]) + if err != nil { + return nil, err + } + evis[i] = ev + } -// ValidateBasic performs basic validation. -func (m *ListMessage) ValidateBasic() error { - for i, ev := range m.Evidence { + for i, ev := range evis { if err := ev.ValidateBasic(); err != nil { - return fmt.Errorf("invalid evidence (#%d): %v", i, err) + return nil, fmt.Errorf("invalid evidence (#%d): %v", i, err) } } - return nil -} -// String returns a string representation of the ListMessage. -func (m *ListMessage) String() string { - return fmt.Sprintf("[ListMessage %v]", m.Evidence) + return evis, nil } diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 135c191da..170b45348 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -1,6 +1,7 @@ -package evidence +package evidence_test import ( + "encoding/hex" "fmt" "sync" "testing" @@ -8,16 +9,182 @@ import ( "github.com/go-kit/kit/log/term" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/evidence" + "github.com/tendermint/tendermint/evidence/mocks" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) +var ( + numEvidence = 10 + timeout = 120 * time.Second // ridiculously high because CircleCI is slow +) + +// We have N evidence reactors connected to one another. The first reactor +// receives a number of evidence at varying heights. We test that all +// other reactors receive the evidence and add it to their own respective +// evidence pools. +func TestReactorBroadcastEvidence(t *testing.T) { + config := cfg.TestConfig() + N := 7 + + // create statedb for everyone + stateDBs := make([]sm.Store, N) + val := types.NewMockPV() + // we need validators saved for heights at least as high as we have evidence for + height := int64(numEvidence) + 10 + for i := 0; i < N; i++ { + stateDBs[i] = initializeValidatorState(val, height) + } + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, stateDBs) + + // set the peer height on each reactor + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + ps := peerState{height} + peer.Set(types.PeerStateKey, ps) + } + } + + // send a bunch of valid evidence to the first reactor's evpool + // and wait for them all to be received in the others + evList := sendEvidence(t, pools[0], val, numEvidence) + waitForEvidence(t, evList, pools) +} + +// We have two evidence reactors connected to one another but are at different heights. +// Reactor 1 which is ahead receives a number of evidence. It should only send the evidence +// that is below the height of the peer to that peer. +func TestReactorSelectiveBroadcast(t *testing.T) { + config := cfg.TestConfig() + + val := types.NewMockPV() + height1 := int64(numEvidence) + 10 + height2 := int64(numEvidence) / 2 + + // DB1 is ahead of DB2 + stateDB1 := initializeValidatorState(val, height1) + stateDB2 := initializeValidatorState(val, height2) + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) + + // set the peer height on each reactor + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + ps := peerState{height1} + peer.Set(types.PeerStateKey, ps) + } + } + + // update the first reactor peer's height to be very small + peer := reactors[0].Switch.Peers().List()[0] + ps := peerState{height2} + peer.Set(types.PeerStateKey, ps) + + // send a bunch of valid evidence to the first reactor's evpool + evList := sendEvidence(t, pools[0], val, numEvidence) + + // only ones less than the peers height should make it through + waitForEvidence(t, evList[:numEvidence/2-1], []*evidence.Pool{pools[1]}) + + // peers should still be connected + peers := reactors[1].Switch.Peers().List() + assert.Equal(t, 1, len(peers)) +} + +// This tests aims to ensure that reactors don't send evidence that they have committed or that ar +// not ready for the peer through three scenarios. +// First, committed evidence to a newly connected peer +// Second, evidence to a peer that is behind +// Third, evidence that was pending and became committed just before the peer caught up +func TestReactorsGossipNoCommittedEvidence(t *testing.T) { + config := cfg.TestConfig() + + val := types.NewMockPV() + var height int64 = 10 + + // DB1 is ahead of DB2 + stateDB1 := initializeValidatorState(val, height-1) + stateDB2 := initializeValidatorState(val, height-2) + state, err := stateDB1.Load() + require.NoError(t, err) + state.LastBlockHeight++ + + // make reactors from statedb + reactors, pools := makeAndConnectReactorsAndPools(config, []sm.Store{stateDB1, stateDB2}) + + evList := sendEvidence(t, pools[0], val, 2) + pools[0].Update(state, evList) + require.EqualValues(t, uint32(0), pools[0].Size()) + + time.Sleep(100 * time.Millisecond) + + peer := reactors[0].Switch.Peers().List()[0] + ps := peerState{height - 2} + peer.Set(types.PeerStateKey, ps) + + peer = reactors[1].Switch.Peers().List()[0] + ps = peerState{height} + peer.Set(types.PeerStateKey, ps) + + // wait to see that no evidence comes through + time.Sleep(300 * time.Millisecond) + + // the second pool should not have received any evidence because it has already been committed + assert.Equal(t, uint32(0), pools[1].Size(), "second reactor should not have received evidence") + + // the first reactor receives three more evidence + evList = make([]types.Evidence, 3) + for i := 0; i < 3; i++ { + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height-3+int64(i), + time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, state.ChainID) + err := pools[0].AddEvidence(ev) + require.NoError(t, err) + evList[i] = ev + } + + // wait to see that only one evidence is sent + time.Sleep(300 * time.Millisecond) + + // the second pool should only have received the first evidence because it is behind + peerEv, _ := pools[1].PendingEvidence(10000) + assert.EqualValues(t, []types.Evidence{evList[0]}, peerEv) + + // the last evidence is committed and the second reactor catches up in state to the first + // reactor. We therefore expect that the second reactor only receives one more evidence, the + // one that is still pending and not the evidence that has already been committed. + state.LastBlockHeight++ + pools[0].Update(state, []types.Evidence{evList[2]}) + // the first reactor should have the two remaining pending evidence + require.EqualValues(t, uint32(2), pools[0].Size()) + + // now update the state of the second reactor + pools[1].Update(state, types.EvidenceList{}) + peer = reactors[0].Switch.Peers().List()[0] + ps = peerState{height} + peer.Set(types.PeerStateKey, ps) + + // wait to see that only two evidence is sent + time.Sleep(300 * time.Millisecond) + + peerEv, _ = pools[1].PendingEvidence(1000) + assert.EqualValues(t, []types.Evidence{evList[0], evList[1]}, peerEv) +} + // evidenceLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func evidenceLogger() log.Logger { @@ -32,15 +199,27 @@ func evidenceLogger() log.Logger { } // connect N evidence reactors through N switches -func makeAndConnectReactors(config *cfg.Config, stateDBs []dbm.DB) []*Reactor { - N := len(stateDBs) - reactors := make([]*Reactor, N) +func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) ([]*evidence.Reactor, + []*evidence.Pool) { + N := len(stateStores) + + reactors := make([]*evidence.Reactor, N) + pools := make([]*evidence.Pool, N) logger := evidenceLogger() - for i := 0; i < N; i++ { + evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + for i := 0; i < N; i++ { evidenceDB := dbm.NewMemDB() - pool := NewPool(stateDBs[i], evidenceDB) - reactors[i] = NewReactor(pool) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( + &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, + ) + pool, err := evidence.NewPool(evidenceDB, stateStores[i], blockStore) + if err != nil { + panic(err) + } + pools[i] = pool + reactors[i] = evidence.NewReactor(pool) reactors[i].SetLogger(logger.With("validator", i)) } @@ -49,16 +228,17 @@ func makeAndConnectReactors(config *cfg.Config, stateDBs []dbm.DB) []*Reactor { return s }, p2p.Connect2Switches) - return reactors + + return reactors, pools } // wait for all evidence on all reactors -func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*Reactor) { +func waitForEvidence(t *testing.T, evs types.EvidenceList, pools []*evidence.Pool) { // wait for the evidence in all evpools wg := new(sync.WaitGroup) - for i := 0; i < len(reactors); i++ { + for i := 0; i < len(pools); i++ { wg.Add(1) - go _waitForEvidence(t, wg, evs, i, reactors) + go _waitForEvidence(t, wg, evs, i, pools) } done := make(chan struct{}) @@ -67,7 +247,7 @@ func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*Reactor) close(done) }() - timer := time.After(Timeout) + timer := time.After(timeout) select { case <-timer: t.Fatal("Timed out waiting for evidence") @@ -80,76 +260,45 @@ func _waitForEvidence( t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, - reactorIdx int, - reactors []*Reactor, + poolIdx int, + pools []*evidence.Pool, ) { - evpool := reactors[reactorIdx].evpool - for len(evpool.PendingEvidence(-1)) != len(evs) { + evpool := pools[poolIdx] + var evList []types.Evidence + currentPoolSize := 0 + for currentPoolSize != len(evs) { + evList, _ = evpool.PendingEvidence(int64(len(evs) * 500)) // each evidence should not be more than 500 bytes + currentPoolSize = len(evList) time.Sleep(time.Millisecond * 100) } - reapedEv := evpool.PendingEvidence(-1) // put the reaped evidence in a map so we can quickly check we got everything evMap := make(map[string]types.Evidence) - for _, e := range reapedEv { + for _, e := range evList { evMap[string(e.Hash())] = e } for i, expectedEv := range evs { gotEv := evMap[string(expectedEv.Hash())] assert.Equal(t, expectedEv, gotEv, - fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v", - i, reactorIdx, expectedEv, gotEv)) + fmt.Sprintf("evidence at index %d on pool %d don't match: %v vs %v", + i, poolIdx, expectedEv, gotEv)) } wg.Done() } -func sendEvidence(t *testing.T, evpool *Pool, valAddr []byte, n int) types.EvidenceList { +func sendEvidence(t *testing.T, evpool *evidence.Pool, val types.PrivValidator, n int) types.EvidenceList { evList := make([]types.Evidence, n) for i := 0; i < n; i++ { - ev := types.NewMockEvidence(int64(i+1), time.Now().UTC(), 0, valAddr) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(int64(i+1), + time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, evidenceChainID) err := evpool.AddEvidence(ev) - assert.Nil(t, err) + require.NoError(t, err) evList[i] = ev } return evList } -var ( - NumEvidence = 10 - Timeout = 120 * time.Second // ridiculously high because CircleCI is slow -) - -func TestReactorBroadcastEvidence(t *testing.T) { - config := cfg.TestConfig() - N := 7 - - // create statedb for everyone - stateDBs := make([]dbm.DB, N) - valAddr := []byte("myval") - // we need validators saved for heights at least as high as we have evidence for - height := int64(NumEvidence) + 10 - for i := 0; i < N; i++ { - stateDBs[i] = initializeValidatorState(valAddr, height) - } - - // make reactors from statedb - reactors := makeAndConnectReactors(config, stateDBs) - - // set the peer height on each reactor - for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { - ps := peerState{height} - peer.Set(types.PeerStateKey, ps) - } - } - - // send a bunch of valid evidence to the first reactor's evpool - // and wait for them all to be received in the others - evList := sendEvidence(t, reactors[0].evpool, valAddr, NumEvidence) - waitForEvidence(t, evList, reactors) -} - type peerState struct { height int64 } @@ -158,68 +307,73 @@ func (ps peerState) GetHeight() int64 { return ps.height } -func TestReactorSelectiveBroadcast(t *testing.T) { - config := cfg.TestConfig() - - valAddr := []byte("myval") - height1 := int64(NumEvidence) + 10 - height2 := int64(NumEvidence) / 2 - - // DB1 is ahead of DB2 - stateDB1 := initializeValidatorState(valAddr, height1) - stateDB2 := initializeValidatorState(valAddr, height2) - - // make reactors from statedb - reactors := makeAndConnectReactors(config, []dbm.DB{stateDB1, stateDB2}) +func exampleVote(t byte) *types.Vote { + var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") + if err != nil { + panic(err) + } - // set the peer height on each reactor - for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { - ps := peerState{height1} - peer.Set(types.PeerStateKey, ps) - } + return &types.Vote{ + Type: tmproto.SignedMsgType(t), + Height: 3, + Round: 2, + Timestamp: stamp, + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: 56789, } +} - // update the first reactor peer's height to be very small - peer := reactors[0].Switch.Peers().List()[0] - ps := peerState{height2} - peer.Set(types.PeerStateKey, ps) +// nolint:lll //ignore line length for tests +func TestEvidenceVectors(t *testing.T) { - // send a bunch of valid evidence to the first reactor's evpool - evList := sendEvidence(t, reactors[0].evpool, valAddr, NumEvidence) + val := &types.Validator{ + Address: crypto.AddressHash([]byte("validator_address")), + VotingPower: 10, + } - // only ones less than the peers height should make it through - waitForEvidence(t, evList[:NumEvidence/2], reactors[1:2]) + valSet := types.NewValidatorSet([]*types.Validator{val}) - // peers should still be connected - peers := reactors[1].Switch.Peers().List() - assert.Equal(t, 1, len(peers)) -} -func TestListMessageValidationBasic(t *testing.T) { + dupl := types.NewDuplicateVoteEvidence( + exampleVote(1), + exampleVote(2), + defaultEvidenceTime, + valSet, + ) testCases := []struct { - testName string - malleateEvListMsg func(*ListMessage) - expectErr bool + testName string + evidenceList []types.Evidence + expBytes string }{ - {"Good ListMessage", func(evList *ListMessage) {}, false}, - {"Invalid ListMessage", func(evList *ListMessage) { - evList.Evidence = append(evList.Evidence, - &types.DuplicateVoteEvidence{PubKey: secp256k1.GenPrivKey().PubKey()}) - }, true}, + {"DuplicateVoteEvidence", []types.Evidence{dupl}, "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105"}, } + for _, tc := range testCases { tc := tc - t.Run(tc.testName, func(t *testing.T) { - evListMsg := &ListMessage{} - n := 3 - valAddr := []byte("myval") - evListMsg.Evidence = make([]types.Evidence, n) - for i := 0; i < n; i++ { - evListMsg.Evidence[i] = types.NewMockEvidence(int64(i+1), time.Now(), 0, valAddr) - } - tc.malleateEvListMsg(evListMsg) - assert.Equal(t, tc.expectErr, evListMsg.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) + + evi := make([]tmproto.Evidence, len(tc.evidenceList)) + for i := 0; i < len(tc.evidenceList); i++ { + ev, err := types.EvidenceToProto(tc.evidenceList[i]) + require.NoError(t, err, tc.testName) + evi[i] = *ev + } + + epl := tmproto.EvidenceList{ + Evidence: evi, + } + + bz, err := epl.Marshal() + require.NoError(t, err, tc.testName) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } + } diff --git a/evidence/services.go b/evidence/services.go new file mode 100644 index 000000000..5e15b38b4 --- /dev/null +++ b/evidence/services.go @@ -0,0 +1,12 @@ +package evidence + +import ( + "github.com/tendermint/tendermint/types" +) + +//go:generate mockery --case underscore --name BlockStore + +type BlockStore interface { + LoadBlockMeta(height int64) *types.BlockMeta + LoadBlockCommit(height int64) *types.Commit +} diff --git a/evidence/store.go b/evidence/store.go deleted file mode 100644 index f01e9de5f..000000000 --- a/evidence/store.go +++ /dev/null @@ -1,222 +0,0 @@ -package evidence - -import ( - "fmt" - - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/types" -) - -/* -Requirements: - - Valid new evidence must be persisted immediately and never forgotten - - Uncommitted evidence must be continuously broadcast - - Uncommitted evidence has a partial order, the evidence's priority - -Impl: - - First commit atomically in outqueue, pending, lookup. - - Once broadcast, remove from outqueue. No need to sync - - Once committed, atomically remove from pending and update lookup. - -Schema for indexing evidence (note you need both height and hash to find a piece of evidence): - -"evidence-lookup"// -> Info -"evidence-outqueue"/// -> Info -"evidence-pending"// -> Info -*/ - -type Info struct { - Committed bool - Priority int64 - Evidence types.Evidence -} - -const ( - baseKeyLookup = "evidence-lookup" // all evidence - baseKeyOutqueue = "evidence-outqueue" // not-yet broadcast - baseKeyPending = "evidence-pending" // broadcast but not committed -) - -func keyLookup(evidence types.Evidence) []byte { - return keyLookupFromHeightAndHash(evidence.Height(), evidence.Hash()) -} - -// big endian padded hex -func bE(h int64) string { - return fmt.Sprintf("%0.16X", h) -} - -func keyLookupFromHeightAndHash(height int64, hash []byte) []byte { - return _key("%s/%s/%X", baseKeyLookup, bE(height), hash) -} - -func keyOutqueue(evidence types.Evidence, priority int64) []byte { - return _key("%s/%s/%s/%X", baseKeyOutqueue, bE(priority), bE(evidence.Height()), evidence.Hash()) -} - -func keyPending(evidence types.Evidence) []byte { - return _key("%s/%s/%X", baseKeyPending, bE(evidence.Height()), evidence.Hash()) -} - -func _key(format string, o ...interface{}) []byte { - return []byte(fmt.Sprintf(format, o...)) -} - -// Store is a store of all the evidence we've seen, including -// evidence that has been committed, evidence that has been verified but not broadcast, -// and evidence that has been broadcast but not yet committed. -type Store struct { - db dbm.DB -} - -func NewStore(db dbm.DB) *Store { - return &Store{ - db: db, - } -} - -// PriorityEvidence returns the evidence from the outqueue, sorted by highest priority. -func (store *Store) PriorityEvidence() (evidence []types.Evidence) { - // reverse the order so highest priority is first - l := store.listEvidence(baseKeyOutqueue, -1) - for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { - l[i], l[j] = l[j], l[i] - } - - return l -} - -// PendingEvidence returns up to maxNum known, uncommitted evidence. -// If maxNum is -1, all evidence is returned. -func (store *Store) PendingEvidence(maxNum int64) (evidence []types.Evidence) { - return store.listEvidence(baseKeyPending, maxNum) -} - -// listEvidence lists up to maxNum pieces of evidence for the given prefix key. -// It is wrapped by PriorityEvidence and PendingEvidence for convenience. -// If maxNum is -1, there's no cap on the size of returned evidence. -func (store *Store) listEvidence(prefixKey string, maxNum int64) (evidence []types.Evidence) { - var count int64 - iter, err := dbm.IteratePrefix(store.db, []byte(prefixKey)) - if err != nil { - panic(err) - } - defer iter.Close() - for ; iter.Valid(); iter.Next() { - val := iter.Value() - - if count == maxNum { - return evidence - } - count++ - - var ei Info - err := cdc.UnmarshalBinaryBare(val, &ei) - if err != nil { - panic(err) - } - evidence = append(evidence, ei.Evidence) - } - return evidence -} - -// GetInfo fetches the Info with the given height and hash. -// If not found, ei.Evidence is nil. -func (store *Store) GetInfo(height int64, hash []byte) Info { - key := keyLookupFromHeightAndHash(height, hash) - val, err := store.db.Get(key) - if err != nil { - panic(err) - } - if len(val) == 0 { - return Info{} - } - var ei Info - err = cdc.UnmarshalBinaryBare(val, &ei) - if err != nil { - panic(err) - } - return ei -} - -// Has checks if the evidence is already stored -func (store *Store) Has(evidence types.Evidence) bool { - key := keyLookup(evidence) - ok, _ := store.db.Has(key) - return ok -} - -// AddNewEvidence adds the given evidence to the database. -// It returns false if the evidence is already stored. -func (store *Store) AddNewEvidence(evidence types.Evidence, priority int64) (bool, error) { - // check if we already have seen it - if store.Has(evidence) { - return false, nil - } - - ei := Info{ - Committed: false, - Priority: priority, - Evidence: evidence, - } - eiBytes := cdc.MustMarshalBinaryBare(ei) - - // add it to the store - var err error - key := keyOutqueue(evidence, priority) - if err = store.db.Set(key, eiBytes); err != nil { - return false, err - } - - key = keyPending(evidence) - if err = store.db.Set(key, eiBytes); err != nil { - return false, err - } - - key = keyLookup(evidence) - if err = store.db.SetSync(key, eiBytes); err != nil { - return false, err - } - - return true, nil -} - -// MarkEvidenceAsBroadcasted removes evidence from Outqueue. -func (store *Store) MarkEvidenceAsBroadcasted(evidence types.Evidence) { - ei := store.getInfo(evidence) - if ei.Evidence == nil { - // nothing to do; we did not store the evidence yet (AddNewEvidence): - return - } - // remove from the outqueue - key := keyOutqueue(evidence, ei.Priority) - store.db.Delete(key) -} - -// MarkEvidenceAsCommitted removes evidence from pending and outqueue and sets the state to committed. -func (store *Store) MarkEvidenceAsCommitted(evidence types.Evidence) { - // if its committed, its been broadcast - store.MarkEvidenceAsBroadcasted(evidence) - - pendingKey := keyPending(evidence) - store.db.Delete(pendingKey) - - // committed Info doens't need priority - ei := Info{ - Committed: true, - Evidence: evidence, - Priority: 0, - } - - lookupKey := keyLookup(evidence) - store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei)) -} - -//--------------------------------------------------- -// utils - -// getInfo is convenience for calling GetInfo if we have the full evidence. -func (store *Store) getInfo(evidence types.Evidence) Info { - return store.GetInfo(evidence.Height(), evidence.Hash()) -} diff --git a/evidence/store_test.go b/evidence/store_test.go deleted file mode 100644 index 1d45f09a1..000000000 --- a/evidence/store_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package evidence - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/types" -) - -//------------------------------------------- - -func TestStoreAddDuplicate(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewStore(db) - - priority := int64(10) - ev := types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")) - - added, err := store.AddNewEvidence(ev, priority) - require.NoError(t, err) - assert.True(added) - - // cant add twice - added, err = store.AddNewEvidence(ev, priority) - require.NoError(t, err) - assert.False(added) -} - -func TestStoreCommitDuplicate(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewStore(db) - - priority := int64(10) - ev := types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")) - - store.MarkEvidenceAsCommitted(ev) - - added, err := store.AddNewEvidence(ev, priority) - require.NoError(t, err) - assert.False(added) -} - -func TestStoreMark(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewStore(db) - - // before we do anything, priority/pending are empty - priorityEv := store.PriorityEvidence() - pendingEv := store.PendingEvidence(-1) - assert.Equal(0, len(priorityEv)) - assert.Equal(0, len(pendingEv)) - - priority := int64(10) - ev := types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")) - - added, err := store.AddNewEvidence(ev, priority) - require.NoError(t, err) - assert.True(added) - - // get the evidence. verify. should be uncommitted - ei := store.GetInfo(ev.Height(), ev.Hash()) - assert.Equal(ev, ei.Evidence) - assert.Equal(priority, ei.Priority) - assert.False(ei.Committed) - - // new evidence should be returns in priority/pending - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence(-1) - assert.Equal(1, len(priorityEv)) - assert.Equal(1, len(pendingEv)) - - // priority is now empty - store.MarkEvidenceAsBroadcasted(ev) - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence(-1) - assert.Equal(0, len(priorityEv)) - assert.Equal(1, len(pendingEv)) - - // priority and pending are now empty - store.MarkEvidenceAsCommitted(ev) - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence(-1) - assert.Equal(0, len(priorityEv)) - assert.Equal(0, len(pendingEv)) - - // evidence should show committed - newPriority := int64(0) - ei = store.GetInfo(ev.Height(), ev.Hash()) - assert.Equal(ev, ei.Evidence) - assert.Equal(newPriority, ei.Priority) - assert.True(ei.Committed) -} - -func TestStorePriority(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewStore(db) - - // sorted by priority and then height - cases := []struct { - ev types.MockEvidence - priority int64 - }{ - {types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")), 17}, - {types.NewMockEvidence(5, time.Now().UTC(), 2, []byte("val2")), 15}, - {types.NewMockEvidence(10, time.Now().UTC(), 2, []byte("val2")), 13}, - {types.NewMockEvidence(100, time.Now().UTC(), 2, []byte("val2")), 11}, - {types.NewMockEvidence(90, time.Now().UTC(), 2, []byte("val2")), 11}, - {types.NewMockEvidence(80, time.Now().UTC(), 2, []byte("val2")), 11}, - } - - for _, c := range cases { - added, err := store.AddNewEvidence(c.ev, c.priority) - require.NoError(t, err) - assert.True(added) - } - - evList := store.PriorityEvidence() - for i, ev := range evList { - assert.Equal(ev, cases[i].ev) - } -} diff --git a/evidence/verify.go b/evidence/verify.go new file mode 100644 index 000000000..0721ade9a --- /dev/null +++ b/evidence/verify.go @@ -0,0 +1,252 @@ +package evidence + +import ( + "bytes" + "errors" + "fmt" + "sort" + "time" + + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/types" +) + +// verify verifies the evidence fully by checking: +// - It has not already been committed +// - it is sufficiently recent (MaxAge) +// - it is from a key who was a validator at the given height +// - it is internally consistent with state +// - it was properly signed by the alleged equivocator and meets the individual evidence verification requirements +func (evpool *Pool) verify(evidence types.Evidence) error { + var ( + state = evpool.State() + height = state.LastBlockHeight + evidenceParams = state.ConsensusParams.Evidence + ageNumBlocks = height - evidence.Height() + ) + + // verify the time of the evidence + blockMeta := evpool.blockStore.LoadBlockMeta(evidence.Height()) + if blockMeta == nil { + return fmt.Errorf("don't have header #%d", evidence.Height()) + } + evTime := blockMeta.Header.Time + if evidence.Time() != evTime { + return fmt.Errorf("evidence has a different time to the block it is associated with (%v != %v)", + evidence.Time(), evTime) + } + ageDuration := state.LastBlockTime.Sub(evTime) + + // check that the evidence hasn't expired + if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks { + return fmt.Errorf( + "evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v", + evidence.Height(), + evTime, + height-evidenceParams.MaxAgeNumBlocks, + state.LastBlockTime.Add(evidenceParams.MaxAgeDuration), + ) + } + + // apply the evidence-specific verification logic + switch ev := evidence.(type) { + case *types.DuplicateVoteEvidence: + valSet, err := evpool.stateDB.LoadValidators(evidence.Height()) + if err != nil { + return err + } + return VerifyDuplicateVote(ev, state.ChainID, valSet) + + case *types.LightClientAttackEvidence: + commonHeader, err := getSignedHeader(evpool.blockStore, evidence.Height()) + if err != nil { + return err + } + commonVals, err := evpool.stateDB.LoadValidators(evidence.Height()) + if err != nil { + return err + } + trustedHeader := commonHeader + // in the case of lunatic the trusted header is different to the common header + if evidence.Height() != ev.ConflictingBlock.Height { + trustedHeader, err = getSignedHeader(evpool.blockStore, ev.ConflictingBlock.Height) + if err != nil { + return err + } + } + + err = VerifyLightClientAttack(ev, commonHeader, trustedHeader, commonVals, state.LastBlockTime, + state.ConsensusParams.Evidence.MaxAgeDuration) + if err != nil { + return err + } + // find out what type of attack this was and thus extract the malicious validators. Note in the case of an + // Amnesia attack we don't have any malicious validators. + validators := ev.GetByzantineValidators(commonVals, trustedHeader) + // ensure this matches the validators that are listed in the evidence. They should be ordered based on power. + if validators == nil && ev.ByzantineValidators != nil { + return fmt.Errorf("expected nil validators from an amnesia light client attack but got %d", + len(ev.ByzantineValidators)) + } + + if exp, got := len(validators), len(ev.ByzantineValidators); exp != got { + return fmt.Errorf("expected %d byzantine validators from evidence but got %d", + exp, got) + } + + // ensure that both validator arrays are in the same order + sort.Sort(types.ValidatorsByVotingPower(ev.ByzantineValidators)) + + for idx, val := range validators { + if !bytes.Equal(ev.ByzantineValidators[idx].Address, val.Address) { + return fmt.Errorf("evidence contained a different byzantine validator address to the one we were expecting."+ + "Expected %v, got %v", val.Address, ev.ByzantineValidators[idx].Address) + } + if ev.ByzantineValidators[idx].VotingPower != val.VotingPower { + return fmt.Errorf("evidence contained a byzantine validator with a different power to the one we were expecting."+ + "Expected %d, got %d", val.VotingPower, ev.ByzantineValidators[idx].VotingPower) + } + } + + return nil + default: + return fmt.Errorf("unrecognized evidence type: %T", evidence) + } + +} + +// VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves +// the following checks: +// - the common header from the full node has at least 1/3 voting power which is also present in +// the conflicting header's commit +// - the nodes trusted header at the same height as the conflicting header has a different hash +func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, + commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { + // In the case of lunatic attack we need to perform a single verification jump between the + // common header and the conflicting one + if commonHeader.Height != trustedHeader.Height { + err := light.Verify(commonHeader, commonVals, e.ConflictingBlock.SignedHeader, e.ConflictingBlock.ValidatorSet, + trustPeriod, now, 0*time.Second, light.DefaultTrustLevel) + if err != nil { + return fmt.Errorf("skipping verification from common to conflicting header failed: %w", err) + } + } else { + // in the case of equivocation and amnesia we expect some header hashes to be correctly derived + if isInvalidHeader(trustedHeader.Header, e.ConflictingBlock.Header) { + return errors.New("common height is the same as conflicting block height so expected the conflicting" + + " block to be correctly derived yet it wasn't") + } + // ensure that 2/3 of the validator set did vote for this block + if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, + e.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil { + return fmt.Errorf("invalid commit from conflicting block: %w", err) + } + } + + if evTotal, valsTotal := e.TotalVotingPower, commonVals.TotalVotingPower(); evTotal != valsTotal { + return fmt.Errorf("total voting power from the evidence and our validator set does not match (%d != %d)", + evTotal, valsTotal) + } + + if bytes.Equal(trustedHeader.Hash(), e.ConflictingBlock.Hash()) { + return fmt.Errorf("trusted header hash matches the evidence's conflicting header hash: %X", + trustedHeader.Hash()) + } + + return nil +} + +// VerifyDuplicateVote verifies DuplicateVoteEvidence against the state of full node. This involves the +// following checks: +// - the validator is in the validator set at the height of the evidence +// - the height, round, type and validator address of the votes must be the same +// - the block ID's must be different +// - The signatures must both be valid +func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error { + _, val := valSet.GetByAddress(e.VoteA.ValidatorAddress) + if val == nil { + return fmt.Errorf("address %X was not a validator at height %d", e.VoteA.ValidatorAddress, e.Height()) + } + pubKey := val.PubKey + + // H/R/S must be the same + if e.VoteA.Height != e.VoteB.Height || + e.VoteA.Round != e.VoteB.Round || + e.VoteA.Type != e.VoteB.Type { + return fmt.Errorf("h/r/s does not match: %d/%d/%v vs %d/%d/%v", + e.VoteA.Height, e.VoteA.Round, e.VoteA.Type, + e.VoteB.Height, e.VoteB.Round, e.VoteB.Type) + } + + // Address must be the same + if !bytes.Equal(e.VoteA.ValidatorAddress, e.VoteB.ValidatorAddress) { + return fmt.Errorf("validator addresses do not match: %X vs %X", + e.VoteA.ValidatorAddress, + e.VoteB.ValidatorAddress, + ) + } + + // BlockIDs must be different + if e.VoteA.BlockID.Equals(e.VoteB.BlockID) { + return fmt.Errorf( + "block IDs are the same (%v) - not a real duplicate vote", + e.VoteA.BlockID, + ) + } + + // pubkey must match address (this should already be true, sanity check) + addr := e.VoteA.ValidatorAddress + if !bytes.Equal(pubKey.Address(), addr) { + return fmt.Errorf("address (%X) doesn't match pubkey (%v - %X)", + addr, pubKey, pubKey.Address()) + } + + // validator voting power and total voting power must match + if val.VotingPower != e.ValidatorPower { + return fmt.Errorf("validator power from evidence and our validator set does not match (%d != %d)", + e.ValidatorPower, val.VotingPower) + } + if valSet.TotalVotingPower() != e.TotalVotingPower { + return fmt.Errorf("total voting power from the evidence and our validator set does not match (%d != %d)", + e.TotalVotingPower, valSet.TotalVotingPower()) + } + + va := e.VoteA.ToProto() + vb := e.VoteB.ToProto() + // Signatures must be valid + if !pubKey.VerifySignature(types.VoteSignBytes(chainID, va), e.VoteA.Signature) { + return fmt.Errorf("verifying VoteA: %w", types.ErrVoteInvalidSignature) + } + if !pubKey.VerifySignature(types.VoteSignBytes(chainID, vb), e.VoteB.Signature) { + return fmt.Errorf("verifying VoteB: %w", types.ErrVoteInvalidSignature) + } + + return nil +} + +func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, error) { + blockMeta := blockStore.LoadBlockMeta(height) + if blockMeta == nil { + return nil, fmt.Errorf("don't have header at height #%d", height) + } + commit := blockStore.LoadBlockCommit(height) + if commit == nil { + return nil, fmt.Errorf("don't have commit at height #%d", height) + } + return &types.SignedHeader{ + Header: &blockMeta.Header, + Commit: commit, + }, nil +} + +// isInvalidHeader takes a trusted header and matches it againt a conflicting header +// to determine whether the conflicting header was the product of a valid state transition +// or not. If it is then all the deterministic fields of the header should be the same. +// If not, it is an invalid header and constitutes a lunatic attack. +func isInvalidHeader(trusted, conflicting *types.Header) bool { + return !bytes.Equal(trusted.ValidatorsHash, conflicting.ValidatorsHash) || + !bytes.Equal(trusted.NextValidatorsHash, conflicting.NextValidatorsHash) || + !bytes.Equal(trusted.ConsensusHash, conflicting.ConsensusHash) || + !bytes.Equal(trusted.AppHash, conflicting.AppHash) || + !bytes.Equal(trusted.LastResultsHash, conflicting.LastResultsHash) +} diff --git a/evidence/verify_test.go b/evidence/verify_test.go new file mode 100644 index 000000000..0e72582b2 --- /dev/null +++ b/evidence/verify_test.go @@ -0,0 +1,447 @@ +package evidence_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/evidence" + "github.com/tendermint/tendermint/evidence/mocks" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + sm "github.com/tendermint/tendermint/state" + smmocks "github.com/tendermint/tendermint/state/mocks" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +func TestVerifyLightClientAttack_Lunatic(t *testing.T) { + commonVals, commonPrivVals := types.RandValidatorSet(2, 10) + + newVal, newPrivVal := types.RandValidator(false, 9) + + conflictingVals, err := types.ValidatorSetFromExistingValidators(append(commonVals.Validators, newVal)) + require.NoError(t, err) + conflictingPrivVals := append(commonPrivVals, newPrivVal) + + commonHeader := makeHeaderRandom(4) + commonHeader.Time = defaultEvidenceTime + trustedHeader := makeHeaderRandom(10) + + conflictingHeader := makeHeaderRandom(10) + conflictingHeader.Time = defaultEvidenceTime.Add(1 * time.Hour) + conflictingHeader.ValidatorsHash = conflictingVals.Hash() + + // we are simulating a lunatic light client attack + blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) + voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + commit, err := types.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) + require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: conflictingHeader, + Commit: commit, + }, + ValidatorSet: conflictingVals, + }, + CommonHeight: 4, + TotalVotingPower: 20, + ByzantineValidators: commonVals.Validators, + Timestamp: defaultEvidenceTime, + } + + commonSignedHeader := &types.SignedHeader{ + Header: commonHeader, + Commit: &types.Commit{}, + } + trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) + vals, privVals := types.RandValidatorSet(3, 8) + trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), vals) + trustedCommit, err := types.MakeCommit(trustedBlockID, 10, 1, trustedVoteSet, privVals, defaultEvidenceTime) + require.NoError(t, err) + trustedSignedHeader := &types.SignedHeader{ + Header: trustedHeader, + Commit: trustedCommit, + } + + // good pass -> no error + err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, trustedSignedHeader, commonVals, + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) + assert.NoError(t, err) + + // trusted and conflicting hashes are the same -> an error should be returned + err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, ev.ConflictingBlock.SignedHeader, commonVals, + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) + assert.Error(t, err) + + // evidence with different total validator power should fail + ev.TotalVotingPower = 1 + err = evidence.VerifyLightClientAttack(ev, commonSignedHeader, trustedSignedHeader, commonVals, + defaultEvidenceTime.Add(2*time.Hour), 3*time.Hour) + assert.Error(t, err) + ev.TotalVotingPower = 20 + + state := sm.State{ + LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), + LastBlockHeight: 11, + ConsensusParams: *types.DefaultConsensusParams(), + } + stateStore := &smmocks.Store{} + stateStore.On("LoadValidators", int64(4)).Return(commonVals, nil) + stateStore.On("Load").Return(state, nil) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", int64(4)).Return(&types.BlockMeta{Header: *commonHeader}) + blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockCommit", int64(4)).Return(commit) + blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) + + pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) + + evList := types.EvidenceList{ev} + err = pool.CheckEvidence(evList) + assert.NoError(t, err) + + pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + assert.Equal(t, 1, len(pendingEvs)) + + // if we submit evidence only against a single byzantine validator when we see there are more validators then this + // should return an error + ev.ByzantineValidators = []*types.Validator{commonVals.Validators[0]} + err = pool.CheckEvidence(evList) + assert.Error(t, err) + ev.ByzantineValidators = commonVals.Validators // restore evidence + + // If evidence is submitted with an altered timestamp it should return an error + ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) + err = pool.CheckEvidence(evList) + assert.Error(t, err) + +} + +func TestVerifyLightClientAttack_Equivocation(t *testing.T) { + conflictingVals, conflictingPrivVals := types.RandValidatorSet(5, 10) + trustedHeader := makeHeaderRandom(10) + + conflictingHeader := makeHeaderRandom(10) + conflictingHeader.ValidatorsHash = conflictingVals.Hash() + + trustedHeader.ValidatorsHash = conflictingHeader.ValidatorsHash + trustedHeader.NextValidatorsHash = conflictingHeader.NextValidatorsHash + trustedHeader.ConsensusHash = conflictingHeader.ConsensusHash + trustedHeader.AppHash = conflictingHeader.AppHash + trustedHeader.LastResultsHash = conflictingHeader.LastResultsHash + + // we are simulating a duplicate vote attack where all the validators in the conflictingVals set + // except the last validator vote twice + blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) + voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + commit, err := types.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) + require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: conflictingHeader, + Commit: commit, + }, + ValidatorSet: conflictingVals, + }, + CommonHeight: 10, + ByzantineValidators: conflictingVals.Validators[:4], + TotalVotingPower: 50, + Timestamp: defaultEvidenceTime, + } + + trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) + trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedCommit, err := types.MakeCommit(trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) + require.NoError(t, err) + trustedSignedHeader := &types.SignedHeader{ + Header: trustedHeader, + Commit: trustedCommit, + } + + // good pass -> no error + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + assert.NoError(t, err) + + // trusted and conflicting hashes are the same -> an error should be returned + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + assert.Error(t, err) + + // conflicting header has different next validators hash which should have been correctly derived from + // the previous round + ev.ConflictingBlock.Header.NextValidatorsHash = crypto.CRandBytes(tmhash.Size) + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, nil, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + assert.Error(t, err) + // revert next validators hash + ev.ConflictingBlock.Header.NextValidatorsHash = trustedHeader.NextValidatorsHash + + state := sm.State{ + LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), + LastBlockHeight: 11, + ConsensusParams: *types.DefaultConsensusParams(), + } + stateStore := &smmocks.Store{} + stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil) + stateStore.On("Load").Return(state, nil) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) + + pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) + + evList := types.EvidenceList{ev} + err = pool.CheckEvidence(evList) + assert.NoError(t, err) + + pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + assert.Equal(t, 1, len(pendingEvs)) +} + +func TestVerifyLightClientAttack_Amnesia(t *testing.T) { + conflictingVals, conflictingPrivVals := types.RandValidatorSet(5, 10) + + conflictingHeader := makeHeaderRandom(10) + conflictingHeader.ValidatorsHash = conflictingVals.Hash() + trustedHeader := makeHeaderRandom(10) + trustedHeader.ValidatorsHash = conflictingHeader.ValidatorsHash + trustedHeader.NextValidatorsHash = conflictingHeader.NextValidatorsHash + trustedHeader.AppHash = conflictingHeader.AppHash + trustedHeader.ConsensusHash = conflictingHeader.ConsensusHash + trustedHeader.LastResultsHash = conflictingHeader.LastResultsHash + + // we are simulating an amnesia attack where all the validators in the conflictingVals set + // except the last validator vote twice. However this time the commits are of different rounds. + blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) + voteSet := types.NewVoteSet(evidenceChainID, 10, 0, tmproto.SignedMsgType(2), conflictingVals) + commit, err := types.MakeCommit(blockID, 10, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) + require.NoError(t, err) + ev := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: conflictingHeader, + Commit: commit, + }, + ValidatorSet: conflictingVals, + }, + CommonHeight: 10, + ByzantineValidators: nil, // with amnesia evidence no validators are submitted as abci evidence + TotalVotingPower: 50, + Timestamp: defaultEvidenceTime, + } + + trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) + trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedCommit, err := types.MakeCommit(trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) + require.NoError(t, err) + trustedSignedHeader := &types.SignedHeader{ + Header: trustedHeader, + Commit: trustedCommit, + } + + // good pass -> no error + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, trustedSignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + assert.NoError(t, err) + + // trusted and conflicting hashes are the same -> an error should be returned + err = evidence.VerifyLightClientAttack(ev, trustedSignedHeader, ev.ConflictingBlock.SignedHeader, conflictingVals, + defaultEvidenceTime.Add(1*time.Minute), 2*time.Hour) + assert.Error(t, err) + + state := sm.State{ + LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), + LastBlockHeight: 11, + ConsensusParams: *types.DefaultConsensusParams(), + } + stateStore := &smmocks.Store{} + stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil) + stateStore.On("Load").Return(state, nil) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) + + pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + require.NoError(t, err) + pool.SetLogger(log.TestingLogger()) + + evList := types.EvidenceList{ev} + err = pool.CheckEvidence(evList) + assert.NoError(t, err) + + pendingEvs, _ := pool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + assert.Equal(t, 1, len(pendingEvs)) +} + +type voteData struct { + vote1 *types.Vote + vote2 *types.Vote + valid bool +} + +func TestVerifyDuplicateVoteEvidence(t *testing.T) { + val := types.NewMockPV() + val2 := types.NewMockPV() + valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(1)}) + + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) + blockID3 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash")) + blockID4 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash2")) + + const chainID = "mychain" + + vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) + v1 := vote1.ToProto() + err := val.SignVote(chainID, v1) + require.NoError(t, err) + badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime) + bv := badVote.ToProto() + err = val2.SignVote(chainID, bv) + require.NoError(t, err) + + vote1.Signature = v1.Signature + badVote.Signature = bv.Signature + + cases := []voteData{ + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), true}, // different block ids + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3, defaultEvidenceTime), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4, defaultEvidenceTime), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultEvidenceTime), false}, // wrong block id + {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong chain id + {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong height + {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2, defaultEvidenceTime), false}, // wrong round + {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2, defaultEvidenceTime), false}, // wrong step + {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID2, defaultEvidenceTime), false}, // wrong validator + // a different vote time doesn't matter + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)), true}, + {vote1, badVote, false}, // signed by wrong key + } + + require.NoError(t, err) + for _, c := range cases { + ev := &types.DuplicateVoteEvidence{ + VoteA: c.vote1, + VoteB: c.vote2, + ValidatorPower: 1, + TotalVotingPower: 1, + Timestamp: defaultEvidenceTime, + } + if c.valid { + assert.Nil(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be valid") + } else { + assert.NotNil(t, evidence.VerifyDuplicateVote(ev, chainID, valSet), "evidence should be invalid") + } + } + + // create good evidence and correct validator power + goodEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) + goodEv.ValidatorPower = 1 + goodEv.TotalVotingPower = 1 + badEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID) + badTimeEv := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime.Add(1*time.Minute), val, chainID) + badTimeEv.ValidatorPower = 1 + badTimeEv.TotalVotingPower = 1 + state := sm.State{ + ChainID: chainID, + LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), + LastBlockHeight: 11, + ConsensusParams: *types.DefaultConsensusParams(), + } + stateStore := &smmocks.Store{} + stateStore.On("LoadValidators", int64(10)).Return(valSet, nil) + stateStore.On("Load").Return(state, nil) + blockStore := &mocks.BlockStore{} + blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}) + + pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + require.NoError(t, err) + + evList := types.EvidenceList{goodEv} + err = pool.CheckEvidence(evList) + assert.NoError(t, err) + + // evidence with a different validator power should fail + evList = types.EvidenceList{badEv} + err = pool.CheckEvidence(evList) + assert.Error(t, err) + + // evidence with a different timestamp should fail + evList = types.EvidenceList{badTimeEv} + err = pool.CheckEvidence(evList) + assert.Error(t, err) +} + +func makeVote( + t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height int64, + round int32, step int, blockID types.BlockID, time time.Time) *types.Vote { + pubKey, err := val.GetPubKey() + require.NoError(t, err) + v := &types.Vote{ + ValidatorAddress: pubKey.Address(), + ValidatorIndex: valIndex, + Height: height, + Round: round, + Type: tmproto.SignedMsgType(step), + BlockID: blockID, + Timestamp: time, + } + + vpb := v.ToProto() + err = val.SignVote(chainID, vpb) + if err != nil { + panic(err) + } + v.Signature = vpb.Signature + return v +} + +func makeHeaderRandom(height int64) *types.Header { + return &types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: evidenceChainID, + Height: height, + Time: defaultEvidenceTime, + LastBlockID: makeBlockID([]byte("headerhash"), 1000, []byte("partshash")), + LastCommitHash: crypto.CRandBytes(tmhash.Size), + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: crypto.CRandBytes(tmhash.Size), + NextValidatorsHash: crypto.CRandBytes(tmhash.Size), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: crypto.CRandBytes(tmhash.Size), + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + } +} + +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { + var ( + h = make([]byte, tmhash.Size) + psH = make([]byte, tmhash.Size) + ) + copy(h, hash) + copy(psH, partSetHash) + return types.BlockID{ + Hash: h, + PartSetHeader: types.PartSetHeader{ + Total: partSetSize, + Hash: psH, + }, + } +} diff --git a/go.mod b/go.mod index 2b1d5fcb3..30037fb88 100644 --- a/go.mod +++ b/go.mod @@ -1,32 +1,36 @@ module github.com/tendermint/tendermint -go 1.13 +go 1.15 require ( + github.com/BurntSushi/toml v0.3.1 github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d github.com/Workiva/go-datastructures v1.0.52 - github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btcd v0.21.0-beta github.com/btcsuite/btcutil v1.0.2 + github.com/confio/ics23/go v0.6.3 + github.com/cosmos/iavl v0.15.0-rc5 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.10.0 github.com/go-logfmt/logfmt v0.5.0 github.com/gogo/protobuf v1.3.1 - github.com/golang/protobuf v1.4.0 + github.com/golang/protobuf v1.4.3 github.com/gorilla/websocket v1.4.2 github.com/gtank/merlin v0.1.1 + github.com/hdevalence/ed25519consensus v0.0.0-20200813231810-1694d75e712a github.com/libp2p/go-buffer-pool v0.0.2 - github.com/magiconair/properties v1.8.1 + github.com/minio/highwayhash v1.0.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.8.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.7.0 + github.com/sasha-s/go-deadlock v0.2.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.0.0 - github.com/spf13/viper v1.6.3 - github.com/stretchr/testify v1.5.1 - github.com/tendermint/go-amino v0.14.1 - github.com/tendermint/tm-db v0.5.1 - golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 - golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e - google.golang.org/grpc v1.28.1 + github.com/spf13/cobra v1.1.1 + github.com/spf13/viper v1.7.1 + github.com/stretchr/testify v1.6.1 + github.com/tendermint/tm-db v0.6.3 + golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 + golang.org/x/net v0.0.0-20200822124328-c89045814202 + google.golang.org/grpc v1.33.2 ) diff --git a/go.sum b/go.sum index bde49f9fb..1358bfb40 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,25 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -12,13 +27,14 @@ github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrd github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/Workiva/go-datastructures v1.0.52 h1:PLSK6pwn8mYdaoaCZEMsXBpBotr4HHn9abU0yMQt0NI= github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= -github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -37,23 +53,20 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btcd v0.21.0-beta h1:At9hIZdJW0s9E/fAz28nrz6AmcNlSVucCH796ZteX1M= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd h1:qdGvebPBDuYDPGi1WCPjy1tGyMpmDK8IEapSsszn7HE= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723 h1:ZA/jbKoGcVAnER6pCHPEkGdZOV7U1oLUedErBHCUMs0= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -67,16 +80,28 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb h1:+7FsS1gZ1Km5LRjGV2hztpier/5i6ngNjvNpxbWP5I0= +github.com/confio/ics23/go v0.0.0-20200817220745-f173e6211efb/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/confio/ics23/go v0.6.3 h1:PuGK2V1NJWZ8sSkNDq91jgT/cahFEW9RGp4Y5jxulf0= +github.com/confio/ics23/go v0.6.3/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd h1:K3bmPkMDnd2KVQ7xoGmgp+pxoXcBW58vMWaMl9ZWx3c= +github.com/cosmos/iavl v0.15.0-rc3.0.20201009144442-230e9bdf52cd/go.mod h1:3xOIaNNX19p0QrX0VqWa6voPRoJRGGYtny+DH8NEPvE= +github.com/cosmos/iavl v0.15.0-rc5 h1:AMKgaAjXwGANWv56NL4q4hV+a0puSkLYD6cCQAv3i44= +github.com/cosmos/iavl v0.15.0-rc5/go.mod h1:WqoPL9yPTQ85QBMT45OOUzPxG/U/JcJoN7uMjgxke/I= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -84,9 +109,20 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dgraph-io/badger/v2 v2.2007.1 h1:t36VcBCpo4SsmAD5M8wVv1ieVzcALyGfaJ92z4ccULM= +github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -109,7 +145,11 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= @@ -125,6 +165,7 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -138,6 +179,8 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -149,6 +192,11 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -162,10 +210,18 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -178,16 +234,25 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.1/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.7 h1:Nk5kuHrnWUTf/0GL1a/vchH/om9Ap2/HnVna+jYZgTY= +github.com/grpc-ecosystem/grpc-gateway v1.14.7/go.mod h1:oYZKL012gGh6LMyg/xA7Q2yq6j8bu0wa+9w14EEthWU= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f h1:8N8XWLZelZNibkhM1FuF+3Ad3YIbgirjdMiVA0eUkaM= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -209,33 +274,38 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hdevalence/ed25519consensus v0.0.0-20200813231810-1694d75e712a h1:H7I/CTwAupJEX4g8AesPYRKQY0wbGZxQBlg842dGK3k= +github.com/hdevalence/ed25519consensus v0.0.0-20200813231810-1694d75e712a/go.mod h1:V0zo781scjlo5OzNQb2GI8wMt6CD4vs7y1beXtxZEhM= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -260,6 +330,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 h1:hLDRPB66XQT/8+wG9WsDpiCvZf1yKO7sz7scAjSlBa0= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -275,6 +347,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -282,6 +355,10 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -289,8 +366,15 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -306,6 +390,8 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -325,8 +411,10 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -345,8 +433,10 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -355,22 +445,31 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= +github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -380,22 +479,31 @@ github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPH github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= -github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -407,21 +515,24 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= +github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= -github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/tm-db v0.5.1 h1:H9HDq8UEA7Eeg13kdYckkgwwkQLBnJGgX4PgLJRhieY= -github.com/tendermint/tm-db v0.5.1/go.mod h1:g92zWjHpCYlEvQXvy9M168Su8V1IBEeawpXVVBaK4f4= +github.com/tendermint/tendermint v0.34.0-rc4/go.mod h1:yotsojf2C1QBOw4dZrTcxbyxmPUrT4hNuOQWX9XUwB4= +github.com/tendermint/tendermint v0.34.0-rc6/go.mod h1:ugzyZO5foutZImv0Iyx/gOFCX6mjJTgbLHTwi17VDVg= +github.com/tendermint/tm-db v0.6.2 h1:DOn8jwCdjJblrCFJbtonEIPD1IuJWpbRUUdR8GWE4RM= +github.com/tendermint/tm-db v0.6.2/go.mod h1:GYtQ67SUvATOcoY8/+x6ylk8Qo02BQyLrAs+yAcLvGI= +github.com/tendermint/tm-db v0.6.3 h1:ZkhQcKnB8/2jr5EaZwGndN4owkPsGezW2fSisS9zGbg= +github.com/tendermint/tm-db v0.6.3/go.mod h1:lfA1dL9/Y/Y8wwyPp2NMLyn5P5Ptr/gvDFNWtrCWSf8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -429,9 +540,14 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -444,24 +560,45 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 h1:DOmugCavvUtnUD114C1Wh+UgTgQZ4pMLzXxi1pSt+/Y= -golang.org/x/crypto v0.0.0-20200406173513-056763e48d71/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= @@ -475,18 +612,26 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -502,24 +647,48 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed h1:J22ig1FUekjjkmZUM7pTKixYm8DvrYsvrBZdunYeIuQ= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -529,48 +698,96 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6 h1:iRN4+t0lvZX/l9gH14ARF9i58tsVa5a97k6aH95rC3Y= +google.golang.org/genproto v0.0.0-20201111145450-ac7456db90a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -578,6 +795,10 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -594,13 +815,22 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/libs/async/async.go b/libs/async/async.go index e2487661a..e716821b6 100644 --- a/libs/async/async.go +++ b/libs/async/async.go @@ -1,9 +1,9 @@ package async import ( + "fmt" + "runtime" "sync/atomic" - - "github.com/pkg/errors" ) //---------------------------------------- @@ -144,7 +144,10 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) { if pnk := recover(); pnk != nil { atomic.AddInt32(numPanics, 1) // Send panic to taskResultCh. - taskResultCh <- TaskResult{nil, errors.Errorf("panic in task %v", pnk)} + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + taskResultCh <- TaskResult{nil, fmt.Errorf("panic in task %v : %s", pnk, buf)} // Closing taskResultCh lets trs.Wait() work. close(taskResultCh) // Decrement waitgroup. diff --git a/libs/async/async_test.go b/libs/async/async_test.go index ed85827b8..4faead444 100644 --- a/libs/async/async_test.go +++ b/libs/async/async_test.go @@ -1,12 +1,12 @@ package async import ( + "errors" "fmt" "sync/atomic" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -125,7 +125,7 @@ func TestParallelRecover(t *testing.T) { // Verify task #0, #1, #2. checkResult(t, taskResultSet, 0, 0, nil, nil) checkResult(t, taskResultSet, 1, 1, errors.New("some error"), nil) - checkResult(t, taskResultSet, 2, nil, nil, errors.Errorf("panic in task %v", 2).Error()) + checkResult(t, taskResultSet, 2, nil, nil, fmt.Errorf("panic in task %v", 2).Error()) } // Wait for result @@ -139,7 +139,7 @@ func checkResult(t *testing.T, taskResultSet *TaskResultSet, index int, case err != nil: assert.Equal(t, err.Error(), taskResult.Error.Error(), taskName) case pnk != nil: - assert.Equal(t, pnk, taskResult.Error.Error(), taskName) + assert.Contains(t, taskResult.Error.Error(), pnk, taskName) default: assert.Nil(t, taskResult.Error, taskName) } diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go index 061726f7d..10cc04a28 100644 --- a/libs/autofile/autofile.go +++ b/libs/autofile/autofile.go @@ -79,7 +79,7 @@ func OpenAutoFile(path string) (*AutoFile, error) { signal.Notify(af.hupc, syscall.SIGHUP) go func() { for range af.hupc { - af.closeFile() + _ = af.closeFile() } }() @@ -103,7 +103,7 @@ func (af *AutoFile) closeFileRoutine() { for { select { case <-af.closeTicker.C: - af.closeFile() + _ = af.closeFile() case <-af.closeTickerStopc: return } diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index 24ca6343d..c2442a56f 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -10,21 +10,24 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - tmos "github.com/tendermint/tendermint/libs/os" ) func TestSIGHUP(t *testing.T) { origDir, err := os.Getwd() require.NoError(t, err) - defer os.Chdir(origDir) + t.Cleanup(func() { + if err := os.Chdir(origDir); err != nil { + t.Error(err) + } + }) // First, create a temporary directory and move into it dir, err := ioutil.TempDir("", "sighup_test") require.NoError(t, err) - defer os.RemoveAll(dir) - err = os.Chdir(dir) - require.NoError(t, err) + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + require.NoError(t, os.Chdir(dir)) // Create an AutoFile in the temporary directory name := "sighup_test" @@ -39,18 +42,16 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) // Move the file over - err = os.Rename(name, name+"_old") - require.NoError(t, err) + require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory otherDir, err := ioutil.TempDir("", "sighup_test_other") require.NoError(t, err) - defer os.RemoveAll(otherDir) - err = os.Chdir(otherDir) - require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(otherDir) }) + require.NoError(t, os.Chdir(otherDir)) // Send SIGHUP to self. - syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + require.NoError(t, syscall.Kill(syscall.Getpid(), syscall.SIGHUP)) // Wait a bit... signals are not handled synchronously. time.Sleep(time.Millisecond * 10) @@ -60,14 +61,13 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, err) _, err = af.Write([]byte("Line 4\n")) require.NoError(t, err) - err = af.Close() - require.NoError(t, err) + require.NoError(t, af.Close()) // Both files should exist - if body := tmos.MustReadFile(filepath.Join(dir, name+"_old")); string(body) != "Line 1\nLine 2\n" { + if body := mustReadFile(t, filepath.Join(dir, name+"_old")); string(body) != "Line 1\nLine 2\n" { t.Errorf("unexpected body %s", body) } - if body := tmos.MustReadFile(filepath.Join(dir, name)); string(body) != "Line 3\nLine 4\n" { + if body := mustReadFile(t, filepath.Join(dir, name)); string(body) != "Line 3\nLine 4\n" { t.Errorf("unexpected body %s", body) } @@ -108,8 +108,7 @@ func TestAutoFileSize(t *testing.T) { // First, create an AutoFile writing to a tempfile dir f, err := ioutil.TempFile("", "sighup_test") require.NoError(t, err) - err = f.Close() - require.NoError(t, err) + require.NoError(t, f.Close()) // Here is the actual AutoFile. af, err := OpenAutoFile(f.Name()) @@ -129,14 +128,19 @@ func TestAutoFileSize(t *testing.T) { require.NoError(t, err) // 3. Not existing file - err = af.Close() - require.NoError(t, err) - err = os.Remove(f.Name()) - require.NoError(t, err) + require.NoError(t, af.Close()) + require.NoError(t, os.Remove(f.Name())) size, err = af.Size() require.EqualValues(t, 0, size, "Expected a new file to be empty") require.NoError(t, err) // Cleanup - _ = os.Remove(f.Name()) + t.Cleanup(func() { os.Remove(f.Name()) }) +} + +func mustReadFile(t *testing.T, filePath string) []byte { + fileBytes, err := ioutil.ReadFile(filePath) + require.NoError(t, err) + + return fileBytes } diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index ca970ffa2..46eaf71ee 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -23,7 +23,10 @@ func parseFlags() (headPath string, chopSize int64, limitSize int64, version boo flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") flagSet.BoolVar(&version, "version", false, "Version") - flagSet.Parse(os.Args[1:]) + if err := flagSet.Parse(os.Args[1:]); err != nil { + fmt.Printf("err parsing flag: %v\n", err) + os.Exit(1) + } chopSize = parseBytesize(chopSizeStr) limitSize = parseBytesize(limitSizeStr) return @@ -59,8 +62,7 @@ func main() { os.Exit(1) } - err = group.Start() - if err != nil { + if err = group.Start(); err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) os.Exit(1) } @@ -69,10 +71,11 @@ func main() { buf := make([]byte, readBufferSize) for { n, err := os.Stdin.Read(buf) - group.Write(buf[:n]) - group.FlushAndSync() if err != nil { - group.Stop() + if err := group.Stop(); err != nil { + fmt.Fprintf(os.Stderr, "logjack stopped with error %v\n", headPath) + os.Exit(1) + } if err == io.EOF { os.Exit(0) } else { @@ -80,6 +83,15 @@ func main() { os.Exit(1) } } + _, err = group.Write(buf[:n]) + if err != nil { + fmt.Fprintf(os.Stderr, "logjack failed write with error %v\n", headPath) + os.Exit(1) + } + if err := group.FlushAndSync(); err != nil { + fmt.Fprintf(os.Stderr, "logjack flushsync fail with error %v\n", headPath) + os.Exit(1) + } } } diff --git a/libs/autofile/group.go b/libs/autofile/group.go index 5d5822ebc..7848ef131 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -145,7 +145,9 @@ func (g *Group) OnStart() error { // NOTE: g.Head must be closed separately using Close. func (g *Group) OnStop() { g.ticker.Stop() - g.FlushAndSync() + if err := g.FlushAndSync(); err != nil { + g.Logger.Error("Error flushin to disk", "err", err) + } } // Wait blocks until all internal goroutines are finished. Supposed to be @@ -157,7 +159,9 @@ func (g *Group) Wait() { // Close closes the head file. The group must be stopped by this moment. func (g *Group) Close() { - g.FlushAndSync() + if err := g.FlushAndSync(); err != nil { + g.Logger.Error("Error flushin to disk", "err", err) + } g.mtx.Lock() _ = g.Head.closeFile() diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index de29a0fba..0981923eb 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -53,7 +53,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { err := g.WriteLine(tmrand.Str(999)) require.NoError(t, err, "Error appending to head") } - g.FlushAndSync() + err := g.FlushAndSync() + require.NoError(t, err) assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. @@ -61,9 +62,10 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. - err := g.WriteLine(tmrand.Str(999)) + err = g.WriteLine(tmrand.Str(999)) require.NoError(t, err, "Error appending to head") - g.FlushAndSync() + err = g.FlushAndSync() + require.NoError(t, err) // Calling checkHeadSizeLimit this time rolls it. g.checkHeadSizeLimit() @@ -72,7 +74,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 more bytes. err = g.WriteLine(tmrand.Str(999)) require.NoError(t, err, "Error appending to head") - g.FlushAndSync() + err = g.FlushAndSync() + require.NoError(t, err) // Calling checkHeadSizeLimit does nothing. g.checkHeadSizeLimit() @@ -83,7 +86,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { err = g.WriteLine(tmrand.Str(999)) require.NoError(t, err, "Error appending to head") } - g.FlushAndSync() + err = g.FlushAndSync() + require.NoError(t, err) assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. @@ -93,7 +97,8 @@ func TestCheckHeadSizeLimit(t *testing.T) { // Write 1000 more bytes. _, err = g.Head.Write([]byte(tmrand.Str(999) + "\n")) require.NoError(t, err, "Error appending to head") - g.FlushAndSync() + err = g.FlushAndSync() + require.NoError(t, err) assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. @@ -111,7 +116,11 @@ func TestRotateFile(t *testing.T) { // relative paths are resolved at Group creation origDir, err := os.Getwd() require.NoError(t, err) - defer os.Chdir(origDir) + defer func() { + if err := os.Chdir(origDir); err != nil { + t.Error(err) + } + }() dir, err := ioutil.TempDir("", "rotate_test") require.NoError(t, err) @@ -123,15 +132,23 @@ func TestRotateFile(t *testing.T) { require.True(t, filepath.IsAbs(g.Dir)) // Create and rotate files - g.WriteLine("Line 1") - g.WriteLine("Line 2") - g.WriteLine("Line 3") - g.FlushAndSync() + err = g.WriteLine("Line 1") + require.NoError(t, err) + err = g.WriteLine("Line 2") + require.NoError(t, err) + err = g.WriteLine("Line 3") + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) g.RotateFile() - g.WriteLine("Line 4") - g.WriteLine("Line 5") - g.WriteLine("Line 6") - g.FlushAndSync() + err = g.WriteLine("Line 4") + require.NoError(t, err) + err = g.WriteLine("Line 5") + require.NoError(t, err) + err = g.WriteLine("Line 6") + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) // Read g.Head.Path+"000" body1, err := ioutil.ReadFile(g.Head.Path + ".000") @@ -160,8 +177,10 @@ func TestWrite(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) written := []byte("Medusa") - g.Write(written) - g.FlushAndSync() + _, err := g.Write(written) + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) read := make([]byte, len(written)) gr, err := g.NewReader(0) @@ -181,12 +200,16 @@ func TestGroupReaderRead(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) professor := []byte("Professor Monster") - g.Write(professor) - g.FlushAndSync() + _, err := g.Write(professor) + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) g.RotateFile() frankenstein := []byte("Frankenstein's Monster") - g.Write(frankenstein) - g.FlushAndSync() + _, err = g.Write(frankenstein) + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) totalWrittenLength := len(professor) + len(frankenstein) read := make([]byte, totalWrittenLength) @@ -210,13 +233,17 @@ func TestGroupReaderRead2(t *testing.T) { g := createTestGroupWithHeadSizeLimit(t, 0) professor := []byte("Professor Monster") - g.Write(professor) - g.FlushAndSync() + _, err := g.Write(professor) + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) g.RotateFile() frankenstein := []byte("Frankenstein's Monster") frankensteinPart := []byte("Frankenstein") - g.Write(frankensteinPart) // note writing only a part - g.FlushAndSync() + _, err = g.Write(frankensteinPart) // note writing only a part + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) totalLength := len(professor) + len(frankenstein) read := make([]byte, totalLength) @@ -251,8 +278,10 @@ func TestMaxIndex(t *testing.T) { assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") - g.WriteLine("Line 1") - g.FlushAndSync() + err := g.WriteLine("Line 1") + require.NoError(t, err) + err = g.FlushAndSync() + require.NoError(t, err) g.RotateFile() assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") diff --git a/libs/bech32/bech32.go b/libs/bech32/bech32.go deleted file mode 100644 index a4db86d5f..000000000 --- a/libs/bech32/bech32.go +++ /dev/null @@ -1,29 +0,0 @@ -package bech32 - -import ( - "github.com/btcsuite/btcutil/bech32" - "github.com/pkg/errors" -) - -//ConvertAndEncode converts from a base64 encoded byte string to base32 encoded byte string and then to bech32 -func ConvertAndEncode(hrp string, data []byte) (string, error) { - converted, err := bech32.ConvertBits(data, 8, 5, true) - if err != nil { - return "", errors.Wrap(err, "encoding bech32 failed") - } - return bech32.Encode(hrp, converted) - -} - -//DecodeAndConvert decodes a bech32 encoded string and converts to base64 encoded bytes -func DecodeAndConvert(bech string) (string, []byte, error) { - hrp, data, err := bech32.Decode(bech) - if err != nil { - return "", nil, errors.Wrap(err, "decoding bech32 failed") - } - converted, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return "", nil, errors.Wrap(err, "decoding bech32 failed") - } - return hrp, converted, nil -} diff --git a/libs/bech32/bech32_test.go b/libs/bech32/bech32_test.go deleted file mode 100644 index 830942061..000000000 --- a/libs/bech32/bech32_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package bech32_test - -import ( - "bytes" - "crypto/sha256" - "testing" - - "github.com/tendermint/tendermint/libs/bech32" -) - -func TestEncodeAndDecode(t *testing.T) { - - sum := sha256.Sum256([]byte("hello world\n")) - - bech, err := bech32.ConvertAndEncode("shasum", sum[:]) - - if err != nil { - t.Error(err) - } - hrp, data, err := bech32.DecodeAndConvert(bech) - - if err != nil { - t.Error(err) - } - if hrp != "shasum" { - t.Error("Invalid hrp") - } - if !bytes.Equal(data, sum[:]) { - t.Error("Invalid decode") - } -} diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index 81740336e..9d6901460 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -9,6 +9,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" + tmprotobits "github.com/tendermint/tendermint/proto/tendermint/libs/bits" ) // BitArray is a thread-safe implementation of a bit array. @@ -354,14 +355,12 @@ func (bA *BitArray) Update(o *BitArray) { if bA == nil || o == nil { return } + bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() - copy(bA.Elems, o.Elems) + o.mtx.Unlock() + bA.mtx.Unlock() } // MarshalJSON implements json.Marshaler interface by marshaling bit array @@ -418,3 +417,28 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { *bA = *bA2 //nolint:govet return nil } + +// ToProto converts BitArray to protobuf +func (bA *BitArray) ToProto() *tmprotobits.BitArray { + if bA == nil || len(bA.Elems) == 0 { + return nil + } + + return &tmprotobits.BitArray{ + Bits: int64(bA.Bits), + Elems: bA.Elems, + } +} + +// FromProto sets a protobuf BitArray to the given pointer. +func (bA *BitArray) FromProto(protoBitArray *tmprotobits.BitArray) { + if protoBitArray == nil { + bA = nil + return + } + + bA.Bits = int(protoBitArray.Bits) + if len(protoBitArray.Elems) > 0 { + bA.Elems = protoBitArray.Elems + } +} diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 449b5a6ef..e4306ecf2 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -265,3 +265,26 @@ func TestJSONMarshalUnmarshal(t *testing.T) { }) } } + +func TestBitArrayProtoBuf(t *testing.T) { + testCases := []struct { + msg string + bA1 *BitArray + expPass bool + }{ + {"success empty", &BitArray{}, true}, + {"success", NewBitArray(1), true}, + {"success", NewBitArray(2), true}, + {"negative", NewBitArray(-1), false}, + } + for _, tc := range testCases { + protoBA := tc.bA1.ToProto() + ba := new(BitArray) + ba.FromProto(protoBA) + if tc.expPass { + require.Equal(t, tc.bA1, ba, tc.msg) + } else { + require.NotEqual(t, tc.bA1, ba, tc.msg) + } + } +} diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index d7682437b..fd27cf33f 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -1,7 +1,9 @@ package bytes import ( + "bytes" "encoding/hex" + "encoding/json" "fmt" "strings" ) @@ -9,6 +11,11 @@ import ( // The main purpose of HexBytes is to enable HEX-encoding for json/encoding. type HexBytes []byte +var ( + _ json.Marshaler = HexBytes{} + _ json.Unmarshaler = &HexBytes{} +) + // Marshal needed for protobuf compatibility func (bz HexBytes) Marshal() ([]byte, error) { return bz, nil @@ -20,26 +27,34 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// This is the point of Bytes. +// MarshalJSON implements the json.Marshaler interface. The hex bytes is a +// quoted hexadecimal encoded string. func (bz HexBytes) MarshalJSON() ([]byte, error) { s := strings.ToUpper(hex.EncodeToString(bz)) jbz := make([]byte, len(s)+2) jbz[0] = '"' - copy(jbz[1:], []byte(s)) + copy(jbz[1:], s) jbz[len(jbz)-1] = '"' return jbz, nil } -// This is the point of Bytes. +// UnmarshalJSON implements the json.Umarshaler interface. func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte("null")) { + return nil + } + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { return fmt.Errorf("invalid hex string: %s", data) } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) if err != nil { return err } + *bz = bz2 + return nil } diff --git a/libs/cli/flags/log_level.go b/libs/cli/flags/log_level.go index ce88ab835..d96ad3f47 100644 --- a/libs/cli/flags/log_level.go +++ b/libs/cli/flags/log_level.go @@ -1,11 +1,10 @@ package flags import ( + "errors" "fmt" "strings" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/libs/log" ) @@ -51,7 +50,7 @@ func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) ( if module == defaultLogLevelKey { option, err = log.AllowLevel(level) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("Failed to parse default log level (pair %s, list %s)", item, l)) + return nil, fmt.Errorf("failed to parse default log level (pair %s, list %s): %w", item, l, err) } options = append(options, option) isDefaultLogLevelSet = true diff --git a/libs/cli/helper.go b/libs/cli/helper.go index a5014c16c..4b87bd60b 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -19,7 +19,7 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0666) + return ioutil.WriteFile(cfile, []byte(data), 0600) } // RunWithArgs executes the given command with the specified command line args @@ -69,7 +69,7 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s go func() { var buf bytes.Buffer // io.Copy will end when we call reader.Close() below - io.Copy(&buf, reader) + io.Copy(&buf, reader) //nolint:errcheck //ignore error stdC <- buf.String() }() return &stdC diff --git a/libs/cli/setup.go b/libs/cli/setup.go index 6a379c4ad..e4955dcf4 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -4,9 +4,9 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -89,7 +89,10 @@ func (e Executor) Execute() error { err := e.Command.Execute() if err != nil { if viper.GetBool(TraceFlag) { - fmt.Fprintf(os.Stderr, "ERROR: %+v\n", err) + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf) } else { fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) } @@ -151,7 +154,7 @@ func validateOutput(cmd *cobra.Command, args []string) error { switch output { case "text", "json": default: - return errors.Errorf("unsupported output format: %s", output) + return fmt.Errorf("unsupported output format: %s", output) } return nil } diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 04209e493..0cb322344 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -7,7 +7,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/stretchr/testify/assert" @@ -201,10 +200,10 @@ func TestSetupTrace(t *testing.T) { long bool expected string }{ - {nil, nil, false, "Trace flag = false"}, - {[]string{"--trace"}, nil, true, "Trace flag = true"}, + {nil, nil, false, "trace flag = false"}, + {[]string{"--trace"}, nil, true, "trace flag = true"}, {[]string{"--no-such-flag"}, nil, false, "unknown flag: --no-such-flag"}, - {nil, map[string]string{"DBG_TRACE": "true"}, true, "Trace flag = true"}, + {nil, map[string]string{"DBG_TRACE": "true"}, true, "trace flag = true"}, } for idx, tc := range cases { @@ -213,7 +212,7 @@ func TestSetupTrace(t *testing.T) { trace := &cobra.Command{ Use: "trace", RunE: func(cmd *cobra.Command, args []string) error { - return errors.Errorf("Trace flag = %t", viper.GetBool(TraceFlag)) + return fmt.Errorf("trace flag = %t", viper.GetBool(TraceFlag)) }, } cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. @@ -228,10 +227,11 @@ func TestSetupTrace(t *testing.T) { msg := strings.Split(stderr, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) assert.Equal(t, desired, msg[0], i) + t.Log(msg) if tc.long && assert.True(t, len(msg) > 2, i) { // the next line starts the stack trace... - assert.Contains(t, msg[1], "TestSetupTrace", i) - assert.Contains(t, msg[2], "setup_test.go", i) + assert.Contains(t, stderr, "TestSetupTrace", i) + assert.Contains(t, stderr, "setup_test.go", i) } } } diff --git a/libs/clist/clist.go b/libs/clist/clist.go index 393bdf73f..5579b1d0f 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -14,6 +14,8 @@ to ensure garbage collection of removed elements. import ( "fmt" "sync" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) // MaxLength is the max allowed number of elements a linked list is @@ -42,7 +44,7 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx sync.RWMutex + mtx tmsync.RWMutex prev *CElement prevWg *sync.WaitGroup prevWaitCh chan struct{} @@ -218,7 +220,7 @@ func (e *CElement) SetRemoved() { // Operations are goroutine-safe. // Panics if length grows beyond the max. type CList struct { - mtx sync.RWMutex + mtx tmsync.RWMutex wg *sync.WaitGroup waitCh chan struct{} head *CElement // first element diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index 14b7e37c0..d10a1e5ae 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -33,21 +33,21 @@ func TestSmall(t *testing.T) { t.Error("Expected len 3, got ", l.Len()) } - //fmt.Printf("%p %v\n", el1, el1) - //fmt.Printf("%p %v\n", el2, el2) - //fmt.Printf("%p %v\n", el3, el3) + // fmt.Printf("%p %v\n", el1, el1) + // fmt.Printf("%p %v\n", el2, el2) + // fmt.Printf("%p %v\n", el3, el3) r1 := l.Remove(el1) - //fmt.Printf("%p %v\n", el1, el1) - //fmt.Printf("%p %v\n", el2, el2) - //fmt.Printf("%p %v\n", el3, el3) + // fmt.Printf("%p %v\n", el1, el1) + // fmt.Printf("%p %v\n", el2, el2) + // fmt.Printf("%p %v\n", el3, el3) r2 := l.Remove(el2) - //fmt.Printf("%p %v\n", el1, el1) - //fmt.Printf("%p %v\n", el2, el2) - //fmt.Printf("%p %v\n", el3, el3) + // fmt.Printf("%p %v\n", el1, el1) + // fmt.Printf("%p %v\n", el2, el2) + // fmt.Printf("%p %v\n", el3, el3) r3 := l.Remove(el3) @@ -97,10 +97,10 @@ func _TestGCFifo(t *testing.T) { for el := l.Front(); el != nil; { l.Remove(el) - //oldEl := el + // oldEl := el el = el.Next() - //oldEl.DetachPrev() - //oldEl.DetachNext() + // oldEl.DetachPrev() + // oldEl.DetachNext() } runtime.GC() @@ -211,7 +211,7 @@ func TestScanRightDeleteRandom(t *testing.T) { // Remove it l.Remove(rmEl) - //fmt.Print(".") + // fmt.Print(".") // Insert a new element newEl := l.PushBack(-1*i - 1) diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go index da2275d7d..d16631d31 100644 --- a/libs/cmap/cmap.go +++ b/libs/cmap/cmap.go @@ -1,11 +1,13 @@ package cmap -import "sync" +import ( + tmsync "github.com/tendermint/tendermint/libs/sync" +) // CMap is a goroutine-safe map type CMap struct { m map[string]interface{} - l sync.Mutex + l tmsync.Mutex } func NewCMap() *CMap { diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go index b6ea0117d..bab78da96 100644 --- a/libs/cmap/cmap_test.go +++ b/libs/cmap/cmap_test.go @@ -22,7 +22,7 @@ func TestIterateKeysWithValues(t *testing.T) { // Iterating Keys, checking for matching Value for _, key := range cmap.Keys() { - val := strings.Replace(key, "key", "value", -1) + val := strings.ReplaceAll(key, "key", "value") assert.Equal(t, val, cmap.Get(key)) } @@ -60,10 +60,10 @@ func TestContains(t *testing.T) { func BenchmarkCMapHas(b *testing.B) { m := NewCMap() for i := 0; i < 1000; i++ { - m.Set(string(i), i) + m.Set(string(rune(i)), i) } b.ResetTimer() for i := 0; i < b.N; i++ { - m.Has(string(i)) + m.Has(string(rune(i))) } } diff --git a/libs/events/README.md b/libs/events/README.md index 9f6908a7f..9c4d7dc55 100644 --- a/libs/events/README.md +++ b/libs/events/README.md @@ -1,31 +1,32 @@ # events + `import "github.com/tendermint/tendermint/libs/events"` * [Overview](#pkg-overview) * [Index](#pkg-index) -## Overview -Pub-Sub in go with event caching - +## Overview +Pub-Sub in go with event caching +## Index -## Index * [type EventCache](#EventCache) - * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) - * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) - * [func (evc *EventCache) Flush()](#EventCache.Flush) + * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) + * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) + * [func (evc *EventCache) Flush()](#EventCache.Flush) * [type EventCallback](#EventCallback) * [type EventData](#EventData) * [type EventSwitch](#EventSwitch) - * [func NewEventSwitch() EventSwitch](#NewEventSwitch) + * [func NewEventSwitch() EventSwitch](#NewEventSwitch) * [type Eventable](#Eventable) * [type Fireable](#Fireable) -#### Package files +### Package files + [event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) @@ -33,12 +34,14 @@ Pub-Sub in go with event caching -## type [EventCache](/src/target/event_cache.go?s=116:179#L5) +## Type [EventCache](/src/target/event_cache.go?s=116:179#L5) + ``` go type EventCache struct { // contains filtered or unexported fields } ``` + An EventCache buffers events for a Fireable All events are cached. Filtering happens on Flush @@ -48,36 +51,43 @@ All events are cached. Filtering happens on Flush -### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) +### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) + ``` go func NewEventCache(evsw Fireable) *EventCache ``` + Create a new EventCache with an EventSwitch as backend -### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) +### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) + ``` go func (evc *EventCache) FireEvent(event string, data EventData) ``` + Cache an event to be fired upon finality. -### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) +### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) + ``` go func (evc *EventCache) Flush() ``` + Fire events by running evsw.FireEvent on all cached events. Blocks. Clears cached events -## type [EventCallback](/src/target/events.go?s=4201:4240#L185) +## Type [EventCallback](/src/target/events.go?s=4201:4240#L185) + ``` go type EventCallback func(data EventData) ``` @@ -90,11 +100,13 @@ type EventCallback func(data EventData) -## type [EventData](/src/target/events.go?s=243:294#L14) +## Type [EventData](/src/target/events.go?s=243:294#L14) + ``` go type EventData interface { } ``` + Generic event data can be typed and registered with tendermint/go-amino via concrete implementation of this interface @@ -107,7 +119,8 @@ via concrete implementation of this interface -## type [EventSwitch](/src/target/events.go?s=560:771#L29) +## Type [EventSwitch](/src/target/events.go?s=560:771#L29) + ``` go type EventSwitch interface { service.Service @@ -124,7 +137,8 @@ type EventSwitch interface { -### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) +### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) + ``` go func NewEventSwitch() EventSwitch ``` @@ -132,12 +146,14 @@ func NewEventSwitch() EventSwitch -## type [Eventable](/src/target/events.go?s=378:440#L20) +## Type [Eventable](/src/target/events.go?s=378:440#L20) + ``` go type Eventable interface { SetEventSwitch(evsw EventSwitch) } ``` + reactors and other modules should export this interface to become eventable @@ -150,12 +166,14 @@ this interface to become eventable -## type [Fireable](/src/target/events.go?s=490:558#L25) +## Type [Fireable](/src/target/events.go?s=490:558#L25) + ``` go type Fireable interface { FireEvent(event string, data EventData) } ``` + an event switch or cache implements fireable diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go index ab321da3a..342fe8c65 100644 --- a/libs/events/event_cache_test.go +++ b/libs/events/event_cache_test.go @@ -9,23 +9,29 @@ import ( func TestEventCache_Flush(t *testing.T) { evsw := NewEventSwitch() - evsw.Start() - evsw.AddListenerForEvent("nothingness", "", func(data EventData) { + err := evsw.Start() + require.NoError(t, err) + + err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache require.FailNow(t, "We should never receive a message on this switch since none are fired") }) + require.NoError(t, err) + evc := NewEventCache(evsw) evc.Flush() // Check after reset evc.Flush() fail := true pass := false - evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { + err = evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { if fail { require.FailNow(t, "Shouldn't see a message until flushed") } pass = true }) + require.NoError(t, err) + evc.FireEvent("something", struct{ int }{1}) evc.FireEvent("something", struct{ int }{2}) evc.FireEvent("something", struct{ int }{3}) diff --git a/libs/events/events.go b/libs/events/events.go index 2468e4838..ffc06cc02 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -3,9 +3,9 @@ package events import ( "fmt" - "sync" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) // ErrListenerWasRemoved is returned by AddEvent if the listener was removed. @@ -54,7 +54,7 @@ type EventSwitch interface { type eventSwitch struct { service.BaseService - mtx sync.RWMutex + mtx tmsync.RWMutex eventCells map[string]*eventCell listeners map[string]*eventListener } @@ -162,7 +162,7 @@ func (evsw *eventSwitch) FireEvent(event string, data EventData) { // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { - mtx sync.RWMutex + mtx tmsync.RWMutex listeners map[string]EventCallback } @@ -206,7 +206,7 @@ type EventCallback func(data EventData) type eventListener struct { id string - mtx sync.RWMutex + mtx tmsync.RWMutex removed bool events []string } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index c7b2b9d8b..9e21e0235 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -17,15 +17,20 @@ func TestAddListenerForEventFireOnce(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) messages := make(chan EventData) - evsw.AddListenerForEvent("listener", "event", + err = evsw.AddListenerForEvent("listener", "event", func(data EventData) { // test there's no deadlock if we remove the listener inside a callback evsw.RemoveListener("listener") messages <- data }) + require.NoError(t, err) go evsw.FireEvent("event", "data") received := <-messages if received != "data" { @@ -39,16 +44,21 @@ func TestAddListenerForEventFireMany(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener for one event - evsw.AddListenerForEvent("listener", "event", + err = evsw.AddListenerForEvent("listener", "event", func(data EventData) { numbers <- data.(uint64) }) + require.NoError(t, err) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -68,7 +78,11 @@ func TestAddListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) doneSum := make(chan uint64) doneSending1 := make(chan uint64) @@ -76,18 +90,21 @@ func TestAddListenerForDifferentEvents(t *testing.T) { doneSending3 := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener to three events - evsw.AddListenerForEvent("listener", "event1", + err = evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers <- data.(uint64) }) - evsw.AddListenerForEvent("listener", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers <- data.(uint64) }) - evsw.AddListenerForEvent("listener", "event3", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener", "event3", func(data EventData) { numbers <- data.(uint64) }) + require.NoError(t, err) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events @@ -113,7 +130,12 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -123,26 +145,31 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", + err = evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener1", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener1", "event3", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener2", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) }) - evsw.AddListenerForEvent("listener2", "event3", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) }) + require.NoError(t, err) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 @@ -175,7 +202,11 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) done1 := make(chan struct{}) done2 := make(chan struct{}) @@ -183,23 +214,25 @@ func TestAddAndRemoveListenerConcurrency(t *testing.T) { // Must be executed concurrently to uncover the data race. // 1. RemoveListener go func() { + defer close(done1) for i := 0; i < roundCount; i++ { evsw.RemoveListener("listener") } - close(done1) }() // 2. AddListenerForEvent go func() { + defer close(done2) for i := 0; i < roundCount; i++ { index := i - evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), + // we explicitly ignore errors here, since the listener will sometimes be removed + // (that's what we're testing) + _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), func(data EventData) { t.Errorf("should not run callback for %d.\n", index) stopInputEvent = true }) } - close(done2) }() <-done1 @@ -219,7 +252,11 @@ func TestAddAndRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -228,14 +265,16 @@ func TestAddAndRemoveListener(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - evsw.AddListenerForEvent("listener", "event1", + err = evsw.AddListenerForEvent("listener", "event1", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener", "event2", func(data EventData) { numbers2 <- data.(uint64) }) + require.NoError(t, err) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 @@ -264,19 +303,27 @@ func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work - evsw.AddListenerForEvent("listener", "event1", + err = evsw.AddListenerForEvent("listener", "event1", func(data EventData) { sum1++ }) - evsw.AddListenerForEvent("listener", "event2", + require.NoError(t, err) + + err = evsw.AddListenerForEvent("listener", "event2", func(data EventData) { sum2++ }) + require.NoError(t, err) + for i := 0; i < count; i++ { evsw.FireEvent("event1", true) evsw.FireEvent("event2", true) @@ -317,7 +364,11 @@ func TestRemoveListenersAsync(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() require.NoError(t, err) - defer evsw.Stop() + t.Cleanup(func() { + if err := evsw.Stop(); err != nil { + t.Error(err) + } + }) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -327,30 +378,36 @@ func TestRemoveListenersAsync(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", + err = evsw.AddListenerForEvent("listener1", "event1", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener1", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener1", "event2", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener1", "event3", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener1", "event3", func(data EventData) { numbers1 <- data.(uint64) }) - evsw.AddListenerForEvent("listener2", "event1", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener2", "event1", func(data EventData) { numbers2 <- data.(uint64) }) - evsw.AddListenerForEvent("listener2", "event2", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener2", "event2", func(data EventData) { numbers2 <- data.(uint64) }) - evsw.AddListenerForEvent("listener2", "event3", + require.NoError(t, err) + err = evsw.AddListenerForEvent("listener2", "event3", func(data EventData) { numbers2 <- data.(uint64) }) + require.NoError(t, err) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 @@ -361,7 +418,7 @@ func TestRemoveListenersAsync(t *testing.T) { for k := uint16(0); k < 400; k++ { listenerNumber := r1.Intn(100) + 3 eventNumber := r1.Intn(3) + 1 - go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), + go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), //nolint:errcheck // ignore for tests fmt.Sprintf("event%v", eventNumber), func(_ EventData) {}) } diff --git a/libs/fail/fail.go b/libs/fail/fail.go index 03f4feda0..03a2ca668 100644 --- a/libs/fail/fail.go +++ b/libs/fail/fail.go @@ -23,7 +23,7 @@ func envSet() int { } // Fail when FAIL_TEST_INDEX == callIndex -var callIndex int //indexes Fail calls +var callIndex int // indexes Fail calls func Fail() { callIndexToFail := envSet() @@ -32,16 +32,9 @@ func Fail() { } if callIndex == callIndexToFail { - Exit() + fmt.Printf("*** fail-test %d ***\n", callIndex) + os.Exit(1) } callIndex++ } - -func Exit() { - fmt.Printf("*** fail-test %d ***\n", callIndex) - os.Exit(1) - // proc, _ := os.FindProcess(os.Getpid()) - // proc.Signal(os.Interrupt) - // panic(fmt.Sprintf("*** fail-test %d ***", callIndex)) -} diff --git a/libs/flowrate/README.md b/libs/flowrate/README.md index db428090c..caed79aa3 100644 --- a/libs/flowrate/README.md +++ b/libs/flowrate/README.md @@ -7,4 +7,4 @@ go get github.com/mxk/go-flowrate/flowrate The documentation is available at: -http://godoc.org/github.com/mxk/go-flowrate/flowrate + diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go index 2a053805c..c7ba93282 100644 --- a/libs/flowrate/flowrate.go +++ b/libs/flowrate/flowrate.go @@ -8,13 +8,14 @@ package flowrate import ( "math" - "sync" "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) // Monitor monitors and limits the transfer rate of a data stream. type Monitor struct { - mu sync.Mutex // Mutex guarding access to all internal fields + mu tmsync.Mutex // Mutex guarding access to all internal fields active bool // Flag indicating an active transfer start time.Duration // Transfer start time (clock() value) bytes int64 // Total number of bytes transferred diff --git a/libs/json/decoder.go b/libs/json/decoder.go new file mode 100644 index 000000000..86ff27d39 --- /dev/null +++ b/libs/json/decoder.go @@ -0,0 +1,278 @@ +package json + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" +) + +// Unmarshal unmarshals JSON into the given value, using Amino-compatible JSON encoding (strings +// for 64-bit numbers, and type wrappers for registered types). +func Unmarshal(bz []byte, v interface{}) error { + return decode(bz, v) +} + +func decode(bz []byte, v interface{}) error { + if len(bz) == 0 { + return errors.New("cannot decode empty bytes") + } + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return errors.New("must decode into a pointer") + } + rv = rv.Elem() + + // If this is a registered type, defer to interface decoder regardless of whether the input is + // an interface or a bare value. This retains Amino's behavior, but is inconsistent with + // behavior in structs where an interface field will get the type wrapper while a bare value + // field will not. + if typeRegistry.name(rv.Type()) != "" { + return decodeReflectInterface(bz, rv) + } + + return decodeReflect(bz, rv) +} + +func decodeReflect(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("value is not addressable") + } + + // Handle null for slices, interfaces, and pointers + if bytes.Equal(bz, []byte("null")) { + rv.Set(reflect.Zero(rv.Type())) + return nil + } + + // Dereference-and-construct pointers, to handle nested pointers. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + // Times must be UTC and end with Z + if rv.Type() == timeType { + switch { + case len(bz) < 2 || bz[0] != '"' || bz[len(bz)-1] != '"': + return fmt.Errorf("JSON time must be an RFC3339 string, but got %q", bz) + case bz[len(bz)-2] != 'Z': + return fmt.Errorf("JSON time must be UTC and end with 'Z', but got %q", bz) + } + } + + // If value implements json.Umarshaler, call it. + if rv.Addr().Type().Implements(jsonUnmarshalerType) { + return rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz) + } + + switch rv.Type().Kind() { + // Decode complex types recursively. + case reflect.Slice, reflect.Array: + return decodeReflectList(bz, rv) + + case reflect.Map: + return decodeReflectMap(bz, rv) + + case reflect.Struct: + return decodeReflectStruct(bz, rv) + + case reflect.Interface: + return decodeReflectInterface(bz, rv) + + // For 64-bit integers, unwrap expected string and defer to stdlib for integer decoding. + case reflect.Int64, reflect.Int, reflect.Uint64, reflect.Uint: + if bz[0] != '"' || bz[len(bz)-1] != '"' { + return fmt.Errorf("invalid 64-bit integer encoding %q, expected string", string(bz)) + } + bz = bz[1 : len(bz)-1] + fallthrough + + // Anything else we defer to the stdlib. + default: + return decodeStdlib(bz, rv) + } +} + +func decodeReflectList(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("list value is not addressable") + } + + switch rv.Type().Elem().Kind() { + // Decode base64-encoded bytes using stdlib decoder, via byte slice for arrays. + case reflect.Uint8: + if rv.Type().Kind() == reflect.Array { + var buf []byte + if err := json.Unmarshal(bz, &buf); err != nil { + return err + } + if len(buf) != rv.Len() { + return fmt.Errorf("got %v bytes, expected %v", len(buf), rv.Len()) + } + reflect.Copy(rv, reflect.ValueOf(buf)) + + } else if err := decodeStdlib(bz, rv); err != nil { + return err + } + + // Decode anything else into a raw JSON slice, and decode values recursively. + default: + var rawSlice []json.RawMessage + if err := json.Unmarshal(bz, &rawSlice); err != nil { + return err + } + if rv.Type().Kind() == reflect.Slice { + rv.Set(reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), len(rawSlice), len(rawSlice))) + } + if rv.Len() != len(rawSlice) { // arrays of wrong size + return fmt.Errorf("got list of %v elements, expected %v", len(rawSlice), rv.Len()) + } + for i, bz := range rawSlice { + if err := decodeReflect(bz, rv.Index(i)); err != nil { + return err + } + } + } + + // Replace empty slices with nil slices, for Amino compatibility + if rv.Type().Kind() == reflect.Slice && rv.Len() == 0 { + rv.Set(reflect.Zero(rv.Type())) + } + + return nil +} + +func decodeReflectMap(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("map value is not addressable") + } + + // Decode into a raw JSON map, using string keys. + rawMap := make(map[string]json.RawMessage) + if err := json.Unmarshal(bz, &rawMap); err != nil { + return err + } + if rv.Type().Key().Kind() != reflect.String { + return fmt.Errorf("map keys must be strings, got %v", rv.Type().Key().String()) + } + + // Recursively decode values. + rv.Set(reflect.MakeMapWithSize(rv.Type(), len(rawMap))) + for key, bz := range rawMap { + value := reflect.New(rv.Type().Elem()).Elem() + if err := decodeReflect(bz, value); err != nil { + return err + } + rv.SetMapIndex(reflect.ValueOf(key), value) + } + return nil +} + +func decodeReflectStruct(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("struct value is not addressable") + } + sInfo := makeStructInfo(rv.Type()) + + // Decode raw JSON values into a string-keyed map. + rawMap := make(map[string]json.RawMessage) + if err := json.Unmarshal(bz, &rawMap); err != nil { + return err + } + for i, fInfo := range sInfo.fields { + if !fInfo.hidden { + frv := rv.Field(i) + bz := rawMap[fInfo.jsonName] + if len(bz) > 0 { + if err := decodeReflect(bz, frv); err != nil { + return err + } + } else if !fInfo.omitEmpty { + frv.Set(reflect.Zero(frv.Type())) + } + } + } + + return nil +} + +func decodeReflectInterface(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("interface value not addressable") + } + + // Decode the interface wrapper. + wrapper := interfaceWrapper{} + if err := json.Unmarshal(bz, &wrapper); err != nil { + return err + } + if wrapper.Type == "" { + return errors.New("interface type cannot be empty") + } + if len(wrapper.Value) == 0 { + return errors.New("interface value cannot be empty") + } + + // Dereference-and-construct pointers, to handle nested pointers. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + // Look up the interface type, and construct a concrete value. + rt, returnPtr := typeRegistry.lookup(wrapper.Type) + if rt == nil { + return fmt.Errorf("unknown type %q", wrapper.Type) + } + + cptr := reflect.New(rt) + crv := cptr.Elem() + if err := decodeReflect(wrapper.Value, crv); err != nil { + return err + } + + // This makes sure interface implementations with pointer receivers (e.g. func (c *Car)) are + // constructed as pointers behind the interface. The types must be registered as pointers with + // RegisterType(). + if rv.Type().Kind() == reflect.Interface && returnPtr { + if !cptr.Type().AssignableTo(rv.Type()) { + return fmt.Errorf("invalid type %q for this value", wrapper.Type) + } + rv.Set(cptr) + } else { + if !crv.Type().AssignableTo(rv.Type()) { + return fmt.Errorf("invalid type %q for this value", wrapper.Type) + } + rv.Set(crv) + } + return nil +} + +func decodeStdlib(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() && rv.Kind() != reflect.Ptr { + return errors.New("value must be addressable or pointer") + } + + // Make sure we are unmarshaling into a pointer. + target := rv + if rv.Kind() != reflect.Ptr { + target = reflect.New(rv.Type()) + } + if err := json.Unmarshal(bz, target.Interface()); err != nil { + return err + } + rv.Set(target.Elem()) + return nil +} + +type interfaceWrapper struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} diff --git a/libs/json/decoder_test.go b/libs/json/decoder_test.go new file mode 100644 index 000000000..41faa1062 --- /dev/null +++ b/libs/json/decoder_test.go @@ -0,0 +1,151 @@ +package json_test + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/json" +) + +func TestUnmarshal(t *testing.T) { + i64Nil := (*int64)(nil) + str := "string" + strPtr := &str + structNil := (*Struct)(nil) + i32 := int32(32) + i64 := int64(64) + + testcases := map[string]struct { + json string + value interface{} + err bool + }{ + "bool true": {"true", true, false}, + "bool false": {"false", false, false}, + "float32": {"3.14", float32(3.14), false}, + "float64": {"3.14", float64(3.14), false}, + "int32": {`32`, int32(32), false}, + "int32 string": {`"32"`, int32(32), true}, + "int32 ptr": {`32`, &i32, false}, + "int64": {`"64"`, int64(64), false}, + "int64 noend": {`"64`, int64(64), true}, + "int64 number": {`64`, int64(64), true}, + "int64 ptr": {`"64"`, &i64, false}, + "int64 ptr nil": {`null`, i64Nil, false}, + "string": {`"foo"`, "foo", false}, + "string noend": {`"foo`, "foo", true}, + "string ptr": {`"string"`, &str, false}, + "slice byte": {`"AQID"`, []byte{1, 2, 3}, false}, + "slice bytes": {`["AQID"]`, [][]byte{{1, 2, 3}}, false}, + "slice int32": {`[1,2,3]`, []int32{1, 2, 3}, false}, + "slice int64": {`["1","2","3"]`, []int64{1, 2, 3}, false}, + "slice int64 number": {`[1,2,3]`, []int64{1, 2, 3}, true}, + "slice int64 ptr": {`["64"]`, []*int64{&i64}, false}, + "slice int64 empty": {`[]`, []int64(nil), false}, + "slice int64 null": {`null`, []int64(nil), false}, + "array byte": {`"AQID"`, [3]byte{1, 2, 3}, false}, + "array byte large": {`"AQID"`, [4]byte{1, 2, 3, 4}, true}, + "array byte small": {`"AQID"`, [2]byte{1, 2}, true}, + "array int32": {`[1,2,3]`, [3]int32{1, 2, 3}, false}, + "array int64": {`["1","2","3"]`, [3]int64{1, 2, 3}, false}, + "array int64 number": {`[1,2,3]`, [3]int64{1, 2, 3}, true}, + "array int64 large": {`["1","2","3"]`, [4]int64{1, 2, 3, 4}, true}, + "array int64 small": {`["1","2","3"]`, [2]int64{1, 2}, true}, + "map bytes": {`{"b":"AQID"}`, map[string][]byte{"b": {1, 2, 3}}, false}, + "map int32": {`{"a":1,"b":2}`, map[string]int32{"a": 1, "b": 2}, false}, + "map int64": {`{"a":"1","b":"2"}`, map[string]int64{"a": 1, "b": 2}, false}, + "map int64 empty": {`{}`, map[string]int64{}, false}, + "map int64 null": {`null`, map[string]int64(nil), false}, + "map int key": {`{}`, map[int]int{}, true}, + "time": {`"2020-06-03T17:35:30Z"`, time.Date(2020, 6, 3, 17, 35, 30, 0, time.UTC), false}, + "time non-utc": {`"2020-06-03T17:35:30+02:00"`, time.Time{}, true}, + "time nozone": {`"2020-06-03T17:35:30"`, time.Time{}, true}, + "car": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Car{Wheels: 4}, false}, + "car ptr": {`{"type":"vehicle/car","value":{"Wheels":4}}`, &Car{Wheels: 4}, false}, + "car iface": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Vehicle(&Car{Wheels: 4}), false}, + "boat": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Boat{Sail: true}, false}, + "boat ptr": {`{"type":"vehicle/boat","value":{"Sail":true}}`, &Boat{Sail: true}, false}, + "boat iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(Boat{Sail: true}), false}, + "boat into car": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Car{}, true}, + "boat into car iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(&Car{}), true}, + "shoes": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, Car{}, true}, + "shoes ptr": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, &Car{}, true}, + "shoes iface": {`{"type":"vehicle/shoes","value":{"Soles":"rubbes"}}`, Vehicle(&Car{}), true}, + "key public": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, false}, + "key wrong": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PrivateKey{1, 2, 3, 4, 5, 6, 7, 8}, true}, + "key into car": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, Vehicle(&Car{}), true}, + "tags": { + `{"name":"name","OmitEmpty":"foo","Hidden":"bar","tags":{"name":"child"}}`, + Tags{JSONName: "name", OmitEmpty: "foo", Tags: &Tags{JSONName: "child"}}, + false, + }, + "tags ptr": { + `{"name":"name","OmitEmpty":"foo","tags":null}`, + &Tags{JSONName: "name", OmitEmpty: "foo"}, + false, + }, + "tags real name": {`{"JSONName":"name"}`, Tags{}, false}, + "struct": { + `{ + "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", + "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", + "Time":"2020-06-02T16:05:13.004346374Z", + "Car":{"Wheels":4}, + "Boat":{"Sail":true}, + "Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"vehicle/boat","value":{"Sail":true}} + ], + "Child":{ + "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, + "String":"child", "StringPtrPtr":null, "Bytes":null, + "Time":"0001-01-01T00:00:00Z", + "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null + }, + "private": "foo", "unknown": "bar" + }`, + Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &strPtr, Bytes: []byte{1, 2, 3}, + Time: time.Date(2020, 6, 2, 16, 5, 13, 4346374, time.UTC), + Car: &Car{Wheels: 4}, Boat: Boat{Sail: true}, Vehicles: []Vehicle{ + Vehicle(&Car{Wheels: 4}), + Vehicle(Boat{Sail: true}), + }, + Child: &Struct{Bool: false, String: "child"}, + }, + false, + }, + "struct key into vehicle": {`{"Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"key/public","value":"MTIzNDU2Nzg="} + ]}`, Struct{}, true}, + "struct ptr null": {`null`, structNil, false}, + "custom value": {`{"Value":"foo"}`, CustomValue{}, false}, + "custom ptr": {`"foo"`, &CustomPtr{Value: "custom"}, false}, + "custom ptr value": {`"foo"`, CustomPtr{Value: "custom"}, false}, + "invalid type": {`"foo"`, Struct{}, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Create a target variable as a pointer to the zero value of the tc.value type, + // and wrap it in an empty interface. Decode into that interface. + target := reflect.New(reflect.TypeOf(tc.value)).Interface() + err := json.Unmarshal([]byte(tc.json), target) + if tc.err { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Unwrap the target pointer and get the value behind the interface. + actual := reflect.ValueOf(target).Elem().Interface() + assert.Equal(t, tc.value, actual) + }) + } +} diff --git a/libs/json/doc.go b/libs/json/doc.go new file mode 100644 index 000000000..d5ef4047f --- /dev/null +++ b/libs/json/doc.go @@ -0,0 +1,99 @@ +// Package json provides functions for marshaling and unmarshaling JSON in a format that is +// backwards-compatible with Amino JSON encoding. This mostly differs from encoding/json in +// encoding of integers (64-bit integers are encoded as strings, not numbers), and handling +// of interfaces (wrapped in an interface object with type/value keys). +// +// JSON tags (e.g. `json:"name,omitempty"`) are supported in the same way as encoding/json, as is +// custom marshaling overrides via the json.Marshaler and json.Unmarshaler interfaces. +// +// Note that not all JSON emitted by Tendermint is generated by this library; some is generated by +// encoding/json instead, and kept like that for backwards compatibility. +// +// Encoding of numbers uses strings for 64-bit integers (including unspecified ints), to improve +// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit +// precision): +// +// int32(32) // Output: 32 +// uint32(32) // Output: 32 +// int64(64) // Output: "64" +// uint64(64) // Output: "64" +// int(64) // Output: "64" +// uint(64) // Output: "64" +// +// Encoding of other scalars follows encoding/json: +// +// nil // Output: null +// true // Output: true +// "foo" // Output: "foo" +// "" // Output: "" +// +// Slices and arrays are encoded as encoding/json, including base64-encoding of byte slices +// with additional base64-encoding of byte arrays as well: +// +// []int64(nil) // Output: null +// []int64{} // Output: [] +// []int64{1, 2, 3} // Output: ["1", "2", "3"] +// []int32{1, 2, 3} // Output: [1, 2, 3] +// []byte{1, 2, 3} // Output: "AQID" +// [3]int64{1, 2, 3} // Output: ["1", "2", "3"] +// [3]byte{1, 2, 3} // Output: "AQID" +// +// Maps are encoded as encoding/json, but only strings are allowed as map keys (nil maps are not +// emitted as null, to retain Amino backwards-compatibility): +// +// map[string]int64(nil) // Output: {} +// map[string]int64{} // Output: {} +// map[string]int64{"a":1,"b":2} // Output: {"a":"1","b":"2"} +// map[string]int32{"a":1,"b":2} // Output: {"a":1,"b":2} +// map[bool]int{true:1} // Errors +// +// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero +// times emitted as "0001-01-01T00:00:00Z" as with encoding/json): +// +// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) +// // Output: "2020-06-08T14:21:28.000000123Z" +// time.Time{} // Output: "0001-01-01T00:00:00Z" +// (*time.Time)(nil) // Output: null +// +// Structs are encoded as encoding/json, supporting JSON tags and ignoring private fields: +// +// type Struct struct{ +// Name string +// Value int32 `json:"value,omitempty"` +// private bool +// } +// +// Struct{Name: "foo", Value: 7, private: true} // Output: {"Name":"foo","value":7} +// Struct{} // Output: {"Name":""} +// +// Registered types are encoded with type wrapper, regardless of whether they are given as interface +// or bare struct, but inside structs they are only emitted with type wrapper for interface fields +// (this follows Amino behavior): +// +// type Vehicle interface { +// Drive() error +// } +// +// type Car struct { +// Wheels int8 +// } +// +// func (c *Car) Drive() error { return nil } +// +// RegisterType(&Car{}, "vehicle/car") +// +// Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// &Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// (*Car)(nil) // Output: null +// Vehicle(Car{Wheels: 4}) // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// Vehicle(nil) // Output: null +// +// type Struct struct { +// Car *Car +// Vehicle Vehicle +// } +// +// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} +// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} +// +package json diff --git a/libs/json/encoder.go b/libs/json/encoder.go new file mode 100644 index 000000000..11990e2af --- /dev/null +++ b/libs/json/encoder.go @@ -0,0 +1,254 @@ +package json + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "time" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() + jsonUnmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +) + +// Marshal marshals the value as JSON, using Amino-compatible JSON encoding (strings for +// 64-bit numbers, and type wrappers for registered types). +func Marshal(v interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + err := encode(buf, v) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalIndent marshals the value as JSON, using the given prefix and indentation. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + bz, err := Marshal(v) + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + err = json.Indent(buf, bz, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func encode(w io.Writer, v interface{}) error { + // Bare nil values can't be reflected, so we must handle them here. + if v == nil { + return writeStr(w, "null") + } + rv := reflect.ValueOf(v) + + // If this is a registered type, defer to interface encoder regardless of whether the input is + // an interface or a bare value. This retains Amino's behavior, but is inconsistent with + // behavior in structs where an interface field will get the type wrapper while a bare value + // field will not. + if typeRegistry.name(rv.Type()) != "" { + return encodeReflectInterface(w, rv) + } + + return encodeReflect(w, rv) +} + +func encodeReflect(w io.Writer, rv reflect.Value) error { + if !rv.IsValid() { + return errors.New("invalid reflect value") + } + + // Recursively dereference if pointer. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return writeStr(w, "null") + } + rv = rv.Elem() + } + + // Convert times to UTC. + if rv.Type() == timeType { + rv = reflect.ValueOf(rv.Interface().(time.Time).Round(0).UTC()) + } + + // If the value implements json.Marshaler, defer to stdlib directly. Since we've already + // dereferenced, we try implementations with both value receiver and pointer receiver. We must + // do this after the time normalization above, and thus after dereferencing. + if rv.Type().Implements(jsonMarshalerType) { + return encodeStdlib(w, rv.Interface()) + } else if rv.CanAddr() && rv.Addr().Type().Implements(jsonMarshalerType) { + return encodeStdlib(w, rv.Addr().Interface()) + } + + switch rv.Type().Kind() { + // Complex types must be recursively encoded. + case reflect.Interface: + return encodeReflectInterface(w, rv) + + case reflect.Array, reflect.Slice: + return encodeReflectList(w, rv) + + case reflect.Map: + return encodeReflectMap(w, rv) + + case reflect.Struct: + return encodeReflectStruct(w, rv) + + // 64-bit integers are emitted as strings, to avoid precision problems with e.g. + // Javascript which uses 64-bit floats (having 53-bit precision). + case reflect.Int64, reflect.Int: + return writeStr(w, `"`+strconv.FormatInt(rv.Int(), 10)+`"`) + + case reflect.Uint64, reflect.Uint: + return writeStr(w, `"`+strconv.FormatUint(rv.Uint(), 10)+`"`) + + // For everything else, defer to the stdlib encoding/json encoder + default: + return encodeStdlib(w, rv.Interface()) + } +} + +func encodeReflectList(w io.Writer, rv reflect.Value) error { + // Emit nil slices as null. + if rv.Kind() == reflect.Slice && rv.IsNil() { + return writeStr(w, "null") + } + + // Encode byte slices as base64 with the stdlib encoder. + if rv.Type().Elem().Kind() == reflect.Uint8 { + // Stdlib does not base64-encode byte arrays, only slices, so we copy to slice. + if rv.Type().Kind() == reflect.Array { + slice := reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), rv.Len(), rv.Len()) + reflect.Copy(slice, rv) + rv = slice + } + return encodeStdlib(w, rv.Interface()) + } + + // Anything else we recursively encode ourselves. + length := rv.Len() + if err := writeStr(w, "["); err != nil { + return err + } + for i := 0; i < length; i++ { + if err := encodeReflect(w, rv.Index(i)); err != nil { + return err + } + if i < length-1 { + if err := writeStr(w, ","); err != nil { + return err + } + } + } + return writeStr(w, "]") +} + +func encodeReflectMap(w io.Writer, rv reflect.Value) error { + if rv.Type().Key().Kind() != reflect.String { + return errors.New("map key must be string") + } + + // nil maps are not emitted as nil, to retain Amino compatibility. + + if err := writeStr(w, "{"); err != nil { + return err + } + writeComma := false + for _, keyrv := range rv.MapKeys() { + if writeComma { + if err := writeStr(w, ","); err != nil { + return err + } + } + if err := encodeStdlib(w, keyrv.Interface()); err != nil { + return err + } + if err := writeStr(w, ":"); err != nil { + return err + } + if err := encodeReflect(w, rv.MapIndex(keyrv)); err != nil { + return err + } + writeComma = true + } + return writeStr(w, "}") +} + +func encodeReflectStruct(w io.Writer, rv reflect.Value) error { + sInfo := makeStructInfo(rv.Type()) + if err := writeStr(w, "{"); err != nil { + return err + } + writeComma := false + for i, fInfo := range sInfo.fields { + frv := rv.Field(i) + if fInfo.hidden || (fInfo.omitEmpty && frv.IsZero()) { + continue + } + + if writeComma { + if err := writeStr(w, ","); err != nil { + return err + } + } + if err := encodeStdlib(w, fInfo.jsonName); err != nil { + return err + } + if err := writeStr(w, ":"); err != nil { + return err + } + if err := encodeReflect(w, frv); err != nil { + return err + } + writeComma = true + } + return writeStr(w, "}") +} + +func encodeReflectInterface(w io.Writer, rv reflect.Value) error { + // Get concrete value and dereference pointers. + for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { + if rv.IsNil() { + return writeStr(w, "null") + } + rv = rv.Elem() + } + + // Look up the name of the concrete type + name := typeRegistry.name(rv.Type()) + if name == "" { + return fmt.Errorf("cannot encode unregistered type %v", rv.Type()) + } + + // Write value wrapped in interface envelope + if err := writeStr(w, fmt.Sprintf(`{"type":%q,"value":`, name)); err != nil { + return err + } + if err := encodeReflect(w, rv); err != nil { + return err + } + return writeStr(w, "}") +} + +func encodeStdlib(w io.Writer, v interface{}) error { + // Doesn't stream the output because that adds a newline, as per: + // https://golang.org/pkg/encoding/json/#Encoder.Encode + blob, err := json.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(blob) + return err +} + +func writeStr(w io.Writer, s string) error { + _, err := w.Write([]byte(s)) + return err +} diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go new file mode 100644 index 000000000..88eb56f85 --- /dev/null +++ b/libs/json/encoder_test.go @@ -0,0 +1,104 @@ +package json_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/json" +) + +func TestMarshal(t *testing.T) { + s := "string" + sPtr := &s + i64 := int64(64) + ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) + car := &Car{Wheels: 4} + boat := Boat{Sail: true} + + testcases := map[string]struct { + value interface{} + output string + }{ + "nil": {nil, `null`}, + "string": {"foo", `"foo"`}, + "float32": {float32(3.14), `3.14`}, + "float32 neg": {float32(-3.14), `-3.14`}, + "float64": {float64(3.14), `3.14`}, + "float64 neg": {float64(-3.14), `-3.14`}, + "int32": {int32(32), `32`}, + "int64": {int64(64), `"64"`}, + "int64 neg": {int64(-64), `"-64"`}, + "int64 ptr": {&i64, `"64"`}, + "uint64": {uint64(64), `"64"`}, + "time": {ti, `"2020-06-02T16:05:13.004346374Z"`}, + "time empty": {time.Time{}, `"0001-01-01T00:00:00Z"`}, + "time ptr": {&ti, `"2020-06-02T16:05:13.004346374Z"`}, + "customptr": {CustomPtr{Value: "x"}, `{"Value":"x"}`}, // same as encoding/json + "customptr ptr": {&CustomPtr{Value: "x"}, `"custom"`}, + "customvalue": {CustomValue{Value: "x"}, `"custom"`}, + "customvalue ptr": {&CustomValue{Value: "x"}, `"custom"`}, + "slice nil": {[]int(nil), `null`}, + "slice empty": {[]int{}, `[]`}, + "slice bytes": {[]byte{1, 2, 3}, `"AQID"`}, + "slice int64": {[]int64{1, 2, 3}, `["1","2","3"]`}, + "slice int64 ptr": {[]*int64{&i64, nil}, `["64",null]`}, + "array bytes": {[3]byte{1, 2, 3}, `"AQID"`}, + "array int64": {[3]int64{1, 2, 3}, `["1","2","3"]`}, + "map nil": {map[string]int64(nil), `{}`}, // retain Amino compatibility + "map empty": {map[string]int64{}, `{}`}, + "map int64": {map[string]int64{"a": 1, "b": 2, "c": 3}, `{"a":"1","b":"2","c":"3"}`}, + "car": {car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car value": {*car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car iface": {Vehicle(car), `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car nil": {(*Car)(nil), `null`}, + "boat": {boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "boat ptr": {&boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "boat iface": {Vehicle(boat), `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "key public": {PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, `{"type":"key/public","value":"AQIDBAUGBwg="}`}, + "tags": { + Tags{JSONName: "name", OmitEmpty: "foo", Hidden: "bar", Tags: &Tags{JSONName: "child"}}, + `{"name":"name","OmitEmpty":"foo","tags":{"name":"child"}}`, + }, + "tags empty": {Tags{}, `{"name":""}`}, + // The encoding of the Car and Boat fields do not have type wrappers, even though they get + // type wrappers when encoded directly (see "car" and "boat" tests). This is to retain the + // same behavior as Amino. If the field was a Vehicle interface instead, it would get + // type wrappers, as seen in the Vehicles field. + "struct": { + Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, + Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, + Child: &Struct{Bool: false, String: "child"}, private: "private", + }, + `{ + "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", + "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", + "Time":"2020-06-02T16:05:13.004346374Z", + "Car":{"Wheels":4}, + "Boat":{"Sail":true}, + "Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"vehicle/boat","value":{"Sail":true}} + ], + "Child":{ + "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, + "String":"child", "StringPtrPtr":null, "Bytes":null, + "Time":"0001-01-01T00:00:00Z", + "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null + } + }`, + }, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + bz, err := json.Marshal(tc.value) + require.NoError(t, err) + assert.JSONEq(t, tc.output, string(bz)) + }) + } +} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go new file mode 100644 index 000000000..a87bc51f1 --- /dev/null +++ b/libs/json/helpers_test.go @@ -0,0 +1,92 @@ +package json_test + +import ( + "time" + + "github.com/tendermint/tendermint/libs/json" +) + +// Register Car, an instance of the Vehicle interface. +func init() { + json.RegisterType(&Car{}, "vehicle/car") + json.RegisterType(Boat{}, "vehicle/boat") + json.RegisterType(PublicKey{}, "key/public") + json.RegisterType(PrivateKey{}, "key/private") +} + +type Vehicle interface { + Drive() error +} + +// Car is a pointer implementation of Vehicle. +type Car struct { + Wheels int32 +} + +func (c *Car) Drive() error { return nil } + +// Boat is a value implementation of Vehicle. +type Boat struct { + Sail bool +} + +func (b Boat) Drive() error { return nil } + +// These are public and private encryption keys. +type PublicKey [8]byte +type PrivateKey [8]byte + +// Custom has custom marshalers and unmarshalers, taking pointer receivers. +type CustomPtr struct { + Value string +} + +func (c *CustomPtr) MarshalJSON() ([]byte, error) { + return []byte("\"custom\""), nil +} + +func (c *CustomPtr) UnmarshalJSON(bz []byte) error { + c.Value = "custom" + return nil +} + +// CustomValue has custom marshalers and unmarshalers, taking value receivers (which usually doesn't +// make much sense since the unmarshaler can't change anything). +type CustomValue struct { + Value string +} + +func (c CustomValue) MarshalJSON() ([]byte, error) { + return []byte("\"custom\""), nil +} + +func (c CustomValue) UnmarshalJSON(bz []byte) error { + c.Value = "custom" + return nil +} + +// Tags tests JSON tags. +type Tags struct { + JSONName string `json:"name"` + OmitEmpty string `json:",omitempty"` + Hidden string `json:"-"` + Tags *Tags `json:"tags,omitempty"` +} + +// Struct tests structs with lots of contents. +type Struct struct { + Bool bool + Float64 float64 + Int32 int32 + Int64 int64 + Int64Ptr *int64 + String string + StringPtrPtr **string + Bytes []byte + Time time.Time + Car *Car + Boat Boat + Vehicles []Vehicle + Child *Struct + private string +} diff --git a/libs/json/structs.go b/libs/json/structs.go new file mode 100644 index 000000000..2037cdad9 --- /dev/null +++ b/libs/json/structs.go @@ -0,0 +1,88 @@ +package json + +import ( + "fmt" + "reflect" + "strings" + "unicode" + + tmsync "github.com/tendermint/tendermint/libs/sync" +) + +var ( + // cache caches struct info. + cache = newStructInfoCache() +) + +// structCache is a cache of struct info. +type structInfoCache struct { + tmsync.RWMutex + structInfos map[reflect.Type]*structInfo +} + +func newStructInfoCache() *structInfoCache { + return &structInfoCache{ + structInfos: make(map[reflect.Type]*structInfo), + } +} + +func (c *structInfoCache) get(rt reflect.Type) *structInfo { + c.RLock() + defer c.RUnlock() + return c.structInfos[rt] +} + +func (c *structInfoCache) set(rt reflect.Type, sInfo *structInfo) { + c.Lock() + defer c.Unlock() + c.structInfos[rt] = sInfo +} + +// structInfo contains JSON info for a struct. +type structInfo struct { + fields []*fieldInfo +} + +// fieldInfo contains JSON info for a struct field. +type fieldInfo struct { + jsonName string + omitEmpty bool + hidden bool +} + +// makeStructInfo generates structInfo for a struct as a reflect.Value. +func makeStructInfo(rt reflect.Type) *structInfo { + if rt.Kind() != reflect.Struct { + panic(fmt.Sprintf("can't make struct info for non-struct value %v", rt)) + } + if sInfo := cache.get(rt); sInfo != nil { + return sInfo + } + fields := make([]*fieldInfo, 0, rt.NumField()) + for i := 0; i < cap(fields); i++ { + frt := rt.Field(i) + fInfo := &fieldInfo{ + jsonName: frt.Name, + omitEmpty: false, + hidden: frt.Name == "" || !unicode.IsUpper(rune(frt.Name[0])), + } + o := frt.Tag.Get("json") + if o == "-" { + fInfo.hidden = true + } else if o != "" { + opts := strings.Split(o, ",") + if opts[0] != "" { + fInfo.jsonName = opts[0] + } + for _, o := range opts[1:] { + if o == "omitempty" { + fInfo.omitEmpty = true + } + } + } + fields = append(fields, fInfo) + } + sInfo := &structInfo{fields: fields} + cache.set(rt, sInfo) + return sInfo +} diff --git a/libs/json/types.go b/libs/json/types.go new file mode 100644 index 000000000..13f20d2bc --- /dev/null +++ b/libs/json/types.go @@ -0,0 +1,109 @@ +package json + +import ( + "errors" + "fmt" + "reflect" + + tmsync "github.com/tendermint/tendermint/libs/sync" +) + +var ( + // typeRegistry contains globally registered types for JSON encoding/decoding. + typeRegistry = newTypes() +) + +// RegisterType registers a type for Amino-compatible interface encoding in the global type +// registry. These types will be encoded with a type wrapper `{"type":"","value":}` +// regardless of which interface they are wrapped in (if any). If the type is a pointer, it will +// still be valid both for value and pointer types, but decoding into an interface will generate +// the a value or pointer based on the registered type. +// +// Should only be called in init() functions, as it panics on error. +func RegisterType(_type interface{}, name string) { + if _type == nil { + panic("cannot register nil type") + } + err := typeRegistry.register(name, reflect.ValueOf(_type).Type()) + if err != nil { + panic(err) + } +} + +// typeInfo contains type information. +type typeInfo struct { + name string + rt reflect.Type + returnPtr bool +} + +// types is a type registry. It is safe for concurrent use. +type types struct { + tmsync.RWMutex + byType map[reflect.Type]*typeInfo + byName map[string]*typeInfo +} + +// newTypes creates a new type registry. +func newTypes() types { + return types{ + byType: map[reflect.Type]*typeInfo{}, + byName: map[string]*typeInfo{}, + } +} + +// registers the given type with the given name. The name and type must not be registered already. +func (t *types) register(name string, rt reflect.Type) error { + if name == "" { + return errors.New("name cannot be empty") + } + // If this is a pointer type, we recursively resolve until we get a bare type, but register that + // we should return pointers. + returnPtr := false + for rt.Kind() == reflect.Ptr { + returnPtr = true + rt = rt.Elem() + } + tInfo := &typeInfo{ + name: name, + rt: rt, + returnPtr: returnPtr, + } + + t.Lock() + defer t.Unlock() + if _, ok := t.byName[tInfo.name]; ok { + return fmt.Errorf("a type with name %q is already registered", name) + } + if _, ok := t.byType[tInfo.rt]; ok { + return fmt.Errorf("the type %v is already registered", rt) + } + t.byName[name] = tInfo + t.byType[rt] = tInfo + return nil +} + +// lookup looks up a type from a name, or nil if not registered. +func (t *types) lookup(name string) (reflect.Type, bool) { + t.RLock() + defer t.RUnlock() + tInfo := t.byName[name] + if tInfo == nil { + return nil, false + } + return tInfo.rt, tInfo.returnPtr +} + +// name looks up the name of a type, or empty if not registered. Unwraps pointers as necessary. +func (t *types) name(rt reflect.Type) string { + for rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + t.RLock() + defer t.RUnlock() + tInfo := t.byType[rt] + if tInfo == nil { + return "" + } + return tInfo.name +} diff --git a/libs/kv/kvpair.go b/libs/kv/kvpair.go deleted file mode 100644 index 2474b2e47..000000000 --- a/libs/kv/kvpair.go +++ /dev/null @@ -1,37 +0,0 @@ -package kv - -import ( - "bytes" - "sort" -) - -//---------------------------------------- -// KVPair - -/* -Defined in types.proto - -type Pair struct { - Key []byte - Value []byte -} -*/ - -type Pairs []Pair - -// Sorting -func (kvs Pairs) Len() int { return len(kvs) } -func (kvs Pairs) Less(i, j int) bool { - switch bytes.Compare(kvs[i].Key, kvs[j].Key) { - case -1: - return true - case 0: - return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 - case 1: - return false - default: - panic("invalid comparison result") - } -} -func (kvs Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } -func (kvs Pairs) Sort() { sort.Sort(kvs) } diff --git a/libs/kv/result.go b/libs/kv/result.go deleted file mode 100644 index fd40450b1..000000000 --- a/libs/kv/result.go +++ /dev/null @@ -1,43 +0,0 @@ -// nolint: dupl -// dupl is reading this as the same file as crypto/merkle/result.go -package kv - -import ( - "bytes" - "encoding/json" - - "github.com/gogo/protobuf/jsonpb" -) - -//--------------------------------------------------------------------------- -// override JSON marshalling so we emit defaults (ie. disable omitempty) - -var ( - jsonpbMarshaller = jsonpb.Marshaler{ - EnumsAsInts: true, - EmitDefaults: true, - } - jsonpbUnmarshaller = jsonpb.Unmarshaler{} -) - -func (r *Pair) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *Pair) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -// Some compile time assertions to ensure we don't -// have accidental runtime surprises later on. -// jsonEncodingRoundTripper ensures that asserted -// interfaces implement both MarshalJSON and UnmarshalJSON - -type jsonRoundTripper interface { - json.Marshaler - json.Unmarshaler -} - -var _ jsonRoundTripper = (*Pair)(nil) diff --git a/libs/kv/types.pb.go b/libs/kv/types.pb.go deleted file mode 100644 index 7a6e6e12e..000000000 --- a/libs/kv/types.pb.go +++ /dev/null @@ -1,511 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: libs/kv/types.proto - -package kv - -import ( - bytes "bytes" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = golang_proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Pair) Reset() { *m = Pair{} } -func (m *Pair) String() string { return proto.CompactTextString(m) } -func (*Pair) ProtoMessage() {} -func (*Pair) Descriptor() ([]byte, []int) { - return fileDescriptor_31432671d164f444, []int{0} -} -func (m *Pair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Pair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Pair) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pair.Merge(m, src) -} -func (m *Pair) XXX_Size() int { - return m.Size() -} -func (m *Pair) XXX_DiscardUnknown() { - xxx_messageInfo_Pair.DiscardUnknown(m) -} - -var xxx_messageInfo_Pair proto.InternalMessageInfo - -func (m *Pair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Pair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") - golang_proto.RegisterType((*Pair)(nil), "tendermint.libs.kv.Pair") -} - -func init() { proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } -func init() { golang_proto.RegisterFile("libs/kv/types.proto", fileDescriptor_31432671d164f444) } - -var fileDescriptor_31432671d164f444 = []byte{ - // 182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4c, 0x2a, - 0xd6, 0xcf, 0x2e, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0x12, 0x2a, 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x03, 0xc9, 0xeb, 0x65, - 0x97, 0x49, 0xa9, 0x95, 0x64, 0x64, 0x16, 0xa5, 0xc4, 0x17, 0x24, 0x16, 0x95, 0x54, 0xea, 0x83, - 0x95, 0xe9, 0xa7, 0xe7, 0xa7, 0xe7, 0x23, 0x58, 0x10, 0xbd, 0x4a, 0x7a, 0x5c, 0x2c, 0x01, 0x89, - 0x99, 0x45, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, - 0x20, 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x13, 0x58, 0x0c, 0xc2, - 0x71, 0x72, 0xfb, 0xf1, 0x50, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x1d, 0x8f, 0xe4, 0x18, 0x4f, - 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x03, 0x8f, 0xe5, 0x18, - 0xa3, 0x34, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x11, 0x8e, 0x42, - 0x66, 0x42, 0xdd, 0x9f, 0xc4, 0x06, 0xb6, 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x3a, - 0xdc, 0xba, 0xd1, 0x00, 0x00, 0x00, -} - -func (this *Pair) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Pair) - if !ok { - that2, ok := that.(Pair) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Key, that1.Key) { - return false - } - if !bytes.Equal(this.Value, that1.Value) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (m *Pair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func NewPopulatedPair(r randyTypes, easy bool) *Pair { - this := &Pair{} - v1 := r.Intn(100) - this.Key = make([]byte, v1) - for i := 0; i < v1; i++ { - this.Key[i] = byte(r.Intn(256)) - } - v2 := r.Intn(100) - this.Value = make([]byte, v2) - for i := 0; i < v2; i++ { - this.Value[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -type randyTypes interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int -} - -func randUTF8RuneTypes(r randyTypes) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringTypes(r randyTypes) string { - v3 := r.Intn(100) - tmps := make([]rune, v3) - for i := 0; i < v3; i++ { - tmps[i] = randUTF8RuneTypes(r) - } - return string(tmps) -} -func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v4 := r.Int63() - if r.Intn(2) == 0 { - v4 *= -1 - } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v4)) - case 1: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { - for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) - v >>= 7 - } - dAtA = append(dAtA, uint8(v)) - return dAtA -} -func (m *Pair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Pair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/libs/kv/types.proto b/libs/kv/types.proto deleted file mode 100644 index 1b6a7a58d..000000000 --- a/libs/kv/types.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package tendermint.libs.kv; -option go_package = "github.com/tendermint/tendermint/libs/kv"; - -import "third_party/proto/gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.goproto_registration) = true; -// Generate tests -option (gogoproto.populate_all) = true; -option (gogoproto.equal_all) = true; -option (gogoproto.testgen_all) = true; - -//---------------------------------------- -// Abstract types - -message Pair { - bytes key = 1; - bytes value = 2; -} diff --git a/libs/kv/typespb_test.go b/libs/kv/typespb_test.go deleted file mode 100644 index dc45bf7f2..000000000 --- a/libs/kv/typespb_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: libs/kv/types.proto - -package kv - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = golang_proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func TestPairProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Pair{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestPairMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Pair{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPairJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &Pair{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestPairProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &Pair{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPairProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &Pair{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestPairSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedPair(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go index d49e8d22b..75273f88c 100644 --- a/libs/log/tm_logger.go +++ b/libs/log/tm_logger.go @@ -54,7 +54,7 @@ func (l *tmLogger) Info(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Info(l.srcLogger) if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { errLogger := kitlevel.Error(l.srcLogger) - kitlog.With(errLogger, msgKey, msg).Log("err", err) + kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again } } @@ -63,7 +63,7 @@ func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Debug(l.srcLogger) if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { errLogger := kitlevel.Error(l.srcLogger) - kitlog.With(errLogger, msgKey, msg).Log("err", err) + kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again } } @@ -72,7 +72,7 @@ func (l *tmLogger) Error(msg string, keyvals ...interface{}) { lWithLevel := kitlevel.Error(l.srcLogger) lWithMsg := kitlog.With(lWithLevel, msgKey, msg) if err := lWithMsg.Log(keyvals...); err != nil { - lWithMsg.Log("err", err) + lWithMsg.Log("err", err) //nolint:errcheck // no need to check error again } } diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index d57f9558e..9519fd310 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -107,7 +107,7 @@ KeyvalueLoop: err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) if err == logfmt.ErrUnsupportedValueType { - enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) //nolint:errcheck // no need to check error again } else if err != nil { return err } diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index 86d0e5693..be87432a1 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -77,6 +77,7 @@ func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Log } } +//nolint: errcheck // ignore errors var ( baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go index 354476755..6d6edc5ca 100644 --- a/libs/log/tracing_logger_test.go +++ b/libs/log/tracing_logger_test.go @@ -22,16 +22,16 @@ func TestTracingLogger(t *testing.T) { err2 := errors.New("it does not matter how slowly you go, so long as you do not stop") logger1.With("err1", err1).Info("foo", "err2", err2) - want := strings.Replace( - strings.Replace( + want := strings.ReplaceAll( + strings.ReplaceAll( `{"_msg":"foo","err1":"`+ fmt.Sprintf("%+v", err1)+ `","err2":"`+ fmt.Sprintf("%+v", err2)+ `","level":"info"}`, - "\t", "", -1, - ), "\n", "", -1) - have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) + "\t", "", + ), "\n", "") + have := strings.ReplaceAll(strings.ReplaceAll(strings.TrimSpace(buf.String()), "\\n", ""), "\\t", "") if want != have { t.Errorf("\nwant '%s'\nhave '%s'", want, have) } diff --git a/libs/math/fraction.go b/libs/math/fraction.go index 6cc2ad23e..e38636485 100644 --- a/libs/math/fraction.go +++ b/libs/math/fraction.go @@ -1,17 +1,48 @@ package math -import "fmt" +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" +) -// Fraction defined in terms of a numerator divided by a denominator in int64 -// format. +// Fraction defined in terms of a numerator divided by a denominator in uint64 +// format. Fraction must be positive. type Fraction struct { // The portion of the denominator in the faction, e.g. 2 in 2/3. - Numerator int64 `json:"numerator"` - // The value by which the numerator is divided, e.g. 3 in 2/3. Must be - // positive. - Denominator int64 `json:"denominator"` + Numerator uint64 `json:"numerator"` + // The value by which the numerator is divided, e.g. 3 in 2/3. + Denominator uint64 `json:"denominator"` } func (fr Fraction) String() string { return fmt.Sprintf("%d/%d", fr.Numerator, fr.Denominator) } + +// ParseFractions takes the string of a fraction as input i.e "2/3" and converts this +// to the equivalent fraction else returns an error. The format of the string must be +// one number followed by a slash (/) and then the other number. +func ParseFraction(f string) (Fraction, error) { + o := strings.Split(f, "/") + if len(o) != 2 { + return Fraction{}, errors.New("incorrect formating: should have a single slash i.e. \"1/3\"") + } + numerator, err := strconv.ParseUint(o[0], 10, 64) + if err != nil { + return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) + } + + denominator, err := strconv.ParseUint(o[1], 10, 64) + if err != nil { + return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) + } + if denominator == 0 { + return Fraction{}, errors.New("denominator can't be 0") + } + if numerator > math.MaxInt64 || denominator > math.MaxInt64 { + return Fraction{}, fmt.Errorf("value overflow, numerator and denominator must be less than %d", math.MaxInt64) + } + return Fraction{Numerator: numerator, Denominator: denominator}, nil +} diff --git a/libs/math/fraction_test.go b/libs/math/fraction_test.go new file mode 100644 index 000000000..73ca0f6c8 --- /dev/null +++ b/libs/math/fraction_test.go @@ -0,0 +1,86 @@ +package math + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseFraction(t *testing.T) { + + testCases := []struct { + f string + exp Fraction + err bool + }{ + { + f: "2/3", + exp: Fraction{2, 3}, + err: false, + }, + { + f: "15/5", + exp: Fraction{15, 5}, + err: false, + }, + // test divide by zero error + { + f: "2/0", + exp: Fraction{}, + err: true, + }, + // test negative + { + f: "-1/2", + exp: Fraction{}, + err: true, + }, + { + f: "1/-2", + exp: Fraction{}, + err: true, + }, + // test overflow + { + f: "9223372036854775808/2", + exp: Fraction{}, + err: true, + }, + { + f: "2/9223372036854775808", + exp: Fraction{}, + err: true, + }, + { + f: "2/3/4", + exp: Fraction{}, + err: true, + }, + { + f: "123", + exp: Fraction{}, + err: true, + }, + { + f: "1a2/4", + exp: Fraction{}, + err: true, + }, + { + f: "1/3bc4", + exp: Fraction{}, + err: true, + }, + } + + for idx, tc := range testCases { + output, err := ParseFraction(tc.f) + if tc.err { + assert.Error(t, err, idx) + } else { + assert.NoError(t, err, idx) + } + assert.Equal(t, tc.exp, output, idx) + } + +} diff --git a/libs/math/safemath.go b/libs/math/safemath.go new file mode 100644 index 000000000..ff7f0908f --- /dev/null +++ b/libs/math/safemath.go @@ -0,0 +1,65 @@ +package math + +import ( + "errors" + "math" +) + +var ErrOverflowInt32 = errors.New("int32 overflow") +var ErrOverflowUint8 = errors.New("uint8 overflow") +var ErrOverflowInt8 = errors.New("int8 overflow") + +// SafeAddInt32 adds two int32 integers +// If there is an overflow this will panic +func SafeAddInt32(a, b int32) int32 { + if b > 0 && (a > math.MaxInt32-b) { + panic(ErrOverflowInt32) + } else if b < 0 && (a < math.MinInt32-b) { + panic(ErrOverflowInt32) + } + return a + b +} + +// SafeSubInt32 subtracts two int32 integers +// If there is an overflow this will panic +func SafeSubInt32(a, b int32) int32 { + if b > 0 && (a < math.MinInt32+b) { + panic(ErrOverflowInt32) + } else if b < 0 && (a > math.MaxInt32+b) { + panic(ErrOverflowInt32) + } + return a - b +} + +// SafeConvertInt32 takes a int and checks if it overflows +// If there is an overflow this will panic +func SafeConvertInt32(a int64) int32 { + if a > math.MaxInt32 { + panic(ErrOverflowInt32) + } else if a < math.MinInt32 { + panic(ErrOverflowInt32) + } + return int32(a) +} + +// SafeConvertUint8 takes an int64 and checks if it overflows +// If there is an overflow it returns an error +func SafeConvertUint8(a int64) (uint8, error) { + if a > math.MaxUint8 { + return 0, ErrOverflowUint8 + } else if a < 0 { + return 0, ErrOverflowUint8 + } + return uint8(a), nil +} + +// SafeConvertInt8 takes an int64 and checks if it overflows +// If there is an overflow it returns an error +func SafeConvertInt8(a int64) (int8, error) { + if a > math.MaxInt8 { + return 0, ErrOverflowInt8 + } else if a < math.MinInt8 { + return 0, ErrOverflowInt8 + } + return int8(a), nil +} diff --git a/libs/os/os.go b/libs/os/os.go index b56726c94..733f7e942 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -2,7 +2,7 @@ package os import ( "fmt" - "io/ioutil" + "io" "os" "os/signal" "syscall" @@ -12,31 +12,22 @@ type logger interface { Info(msg string, keyvals ...interface{}) } -// TrapSignal catches the SIGTERM/SIGINT and executes cb function. After that it exits -// with code 0. +// TrapSignal catches SIGTERM and SIGINT, executes the cleanup function, +// and exits with code 0. func TrapSignal(logger logger, cb func()) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { - for sig := range c { - logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) - if cb != nil { - cb() - } - os.Exit(0) + sig := <-c + logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) + if cb != nil { + cb() } + os.Exit(0) }() } -// Kill the running process by sending itself SIGTERM. -func Kill() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - return p.Signal(syscall.SIGTERM) -} - func Exit(s string) { fmt.Printf(s + "\n") os.Exit(1) @@ -46,7 +37,7 @@ func EnsureDir(dir string, mode os.FileMode) error { if _, err := os.Stat(dir); os.IsNotExist(err) { err := os.MkdirAll(dir, mode) if err != nil { - return fmt.Errorf("could not create directory %v. %v", dir, err) + return fmt.Errorf("could not create directory %v: %w", dir, err) } } return nil @@ -57,26 +48,26 @@ func FileExists(filePath string) bool { return !os.IsNotExist(err) } -func ReadFile(filePath string) ([]byte, error) { - return ioutil.ReadFile(filePath) -} - -func MustReadFile(filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) +// CopyFile copies a file. It truncates the destination file if it exists. +func CopyFile(src, dst string) error { + info, err := os.Stat(src) if err != nil { - Exit(fmt.Sprintf("MustReadFile failed: %v", err)) - return nil + return err } - return fileBytes -} -func WriteFile(filePath string, contents []byte, mode os.FileMode) error { - return ioutil.WriteFile(filePath, contents, mode) -} + srcfile, err := os.Open(src) + if err != nil { + return err + } + defer srcfile.Close() -func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { - err := WriteFile(filePath, contents, mode) + // create new file, truncate if exists and apply same permissions as the original one + dstfile, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode().Perm()) if err != nil { - Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) + return err } + defer dstfile.Close() + + _, err = io.Copy(dstfile, srcfile) + return err } diff --git a/libs/os/os_test.go b/libs/os/os_test.go new file mode 100644 index 000000000..2b44dc8e7 --- /dev/null +++ b/libs/os/os_test.go @@ -0,0 +1,102 @@ +package os_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "syscall" + "testing" + "time" + + tmos "github.com/tendermint/tendermint/libs/os" +) + +func TestCopyFile(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "example") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpfile.Name()) + content := []byte("hello world") + if _, err := tmpfile.Write(content); err != nil { + t.Fatal(err) + } + + copyfile := fmt.Sprintf("%s.copy", tmpfile.Name()) + if err := tmos.CopyFile(tmpfile.Name(), copyfile); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(copyfile); os.IsNotExist(err) { + t.Fatal("copy should exist") + } + data, err := ioutil.ReadFile(copyfile) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, content) { + t.Fatalf("copy file content differs: expected %v, got %v", content, data) + } + os.Remove(copyfile) +} + +func TestTrapSignal(t *testing.T) { + if os.Getenv("TM_TRAP_SIGNAL_TEST") == "1" { + t.Log("inside test process") + killer() + return + } + + cmd, _, mockStderr := newTestProgram(t, "TM_TRAP_SIGNAL_TEST") + + err := cmd.Run() + if err == nil { + wantStderr := "exiting" + if mockStderr.String() != wantStderr { + t.Fatalf("stderr: want %q, got %q", wantStderr, mockStderr.String()) + } + + return + } + + if e, ok := err.(*exec.ExitError); ok && !e.Success() { + t.Fatalf("wrong exit code, want 0, got %d", e.ExitCode()) + } + + t.Fatal("this error should not be triggered") +} + +type mockLogger struct{} + +func (ml mockLogger) Info(msg string, keyvals ...interface{}) {} + +func killer() { + logger := mockLogger{} + + tmos.TrapSignal(logger, func() { _, _ = fmt.Fprintf(os.Stderr, "exiting") }) + time.Sleep(1 * time.Second) + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + panic(err) + } + + if err := p.Signal(syscall.SIGTERM); err != nil { + panic(err) + } + + time.Sleep(1 * time.Second) +} + +func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *bytes.Buffer, stderr *bytes.Buffer) { + t.Helper() + + cmd = exec.Command(os.Args[0], "-test.run="+t.Name()) + stdout, stderr = bytes.NewBufferString(""), bytes.NewBufferString("") + cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", environVar)) + cmd.Stdout = stdout + cmd.Stderr = stderr + + return +} diff --git a/libs/protoio/io.go b/libs/protoio/io.go new file mode 100644 index 000000000..91acbb71b --- /dev/null +++ b/libs/protoio/io.go @@ -0,0 +1,96 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Modified to return number of bytes written by Writer.WriteMsg(), and added byteReader. + +package protoio + +import ( + "io" + + "github.com/gogo/protobuf/proto" +) + +type Writer interface { + WriteMsg(proto.Message) (int, error) +} + +type WriteCloser interface { + Writer + io.Closer +} + +type Reader interface { + ReadMsg(msg proto.Message) error +} + +type ReadCloser interface { + Reader + io.Closer +} + +type marshaler interface { + MarshalTo(data []byte) (n int, err error) +} + +func getSize(v interface{}) (int, bool) { + if sz, ok := v.(interface { + Size() (n int) + }); ok { + return sz.Size(), true + } else if sz, ok := v.(interface { + ProtoSize() (n int) + }); ok { + return sz.ProtoSize(), true + } else { + return 0, false + } +} + +// byteReader wraps an io.Reader and implements io.ByteReader. Reading one byte at a +// time is extremely slow, but this is what Amino did already, and the caller can +// wrap the reader in bufio.Reader if appropriate. +type byteReader struct { + io.Reader + bytes []byte +} + +func newByteReader(r io.Reader) *byteReader { + return &byteReader{ + Reader: r, + bytes: make([]byte, 1), + } +} + +func (r *byteReader) ReadByte() (byte, error) { + _, err := r.Read(r.bytes) + if err != nil { + return 0, err + } + return r.bytes[0], nil +} diff --git a/libs/protoio/io_test.go b/libs/protoio/io_test.go new file mode 100644 index 000000000..f4556b31f --- /dev/null +++ b/libs/protoio/io_test.go @@ -0,0 +1,157 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package protoio_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math/rand" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/test" + + "github.com/tendermint/tendermint/libs/protoio" +) + +func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { + varint := make([]byte, binary.MaxVarintLen64) + size := 1000 + msgs := make([]*test.NinOptNative, size) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := range msgs { + msgs[i] = test.NewPopulatedNinOptNative(r, true) + // issue 31 + if i == 5 { + msgs[i] = &test.NinOptNative{} + } + // issue 31 + if i == 999 { + msgs[i] = &test.NinOptNative{} + } + // FIXME Check size + bz, err := proto.Marshal(msgs[i]) + if err != nil { + return err + } + visize := binary.PutUvarint(varint, uint64(len(bz))) + n, err := writer.WriteMsg(msgs[i]) + if err != nil { + return err + } + if n != len(bz)+visize { + return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) // nolint + } + } + if err := writer.Close(); err != nil { + return err + } + i := 0 + for { + msg := &test.NinOptNative{} + if err := reader.ReadMsg(msg); err != nil { + if err == io.EOF { + break + } + return err + } + if err := msg.VerboseEqual(msgs[i]); err != nil { + return err + } + i++ + } + if i != size { + panic("not enough messages read") + } + if err := reader.Close(); err != nil { + return err + } + return nil +} + +type buffer struct { + *bytes.Buffer + closed bool +} + +func (b *buffer) Close() error { + b.closed = true + return nil +} + +func newBuffer() *buffer { + return &buffer{bytes.NewBuffer(nil), false} +} + +func TestVarintNormal(t *testing.T) { + buf := newBuffer() + writer := protoio.NewDelimitedWriter(buf) + reader := protoio.NewDelimitedReader(buf, 1024*1024) + if err := iotest(writer, reader); err != nil { + t.Error(err) + } + if !buf.closed { + t.Fatalf("did not close buffer") + } +} + +func TestVarintNoClose(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := protoio.NewDelimitedWriter(buf) + reader := protoio.NewDelimitedReader(buf, 1024*1024) + if err := iotest(writer, reader); err != nil { + t.Error(err) + } +} + +// issue 32 +func TestVarintMaxSize(t *testing.T) { + buf := newBuffer() + writer := protoio.NewDelimitedWriter(buf) + reader := protoio.NewDelimitedReader(buf, 20) + if err := iotest(writer, reader); err == nil { + t.Error(err) + } else { + t.Logf("%s", err) + } +} + +func TestVarintError(t *testing.T) { + buf := newBuffer() + buf.Write([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f}) + reader := protoio.NewDelimitedReader(buf, 1024*1024) + msg := &test.NinOptNative{} + err := reader.ReadMsg(msg) + if err == nil { + t.Fatalf("Expected error") + } +} diff --git a/libs/protoio/reader.go b/libs/protoio/reader.go new file mode 100644 index 000000000..15a84899f --- /dev/null +++ b/libs/protoio/reader.go @@ -0,0 +1,88 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Modified from original GoGo Protobuf to not buffer the reader. + +package protoio + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + + "github.com/gogo/protobuf/proto" +) + +// NewDelimitedReader reads varint-delimited Protobuf messages from a reader. Unlike the gogoproto +// NewDelimitedReader, this does not buffer the reader, which may cause poor performance but is +// necessary when only reading single messages (e.g. in the p2p package). +func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { + var closer io.Closer + if c, ok := r.(io.Closer); ok { + closer = c + } + return &varintReader{newByteReader(r), nil, maxSize, closer} +} + +type varintReader struct { + r *byteReader + buf []byte + maxSize int + closer io.Closer +} + +func (r *varintReader) ReadMsg(msg proto.Message) error { + length64, err := binary.ReadUvarint(newByteReader(r.r)) + if err != nil { + return err + } + length := int(length64) + if length < 0 || length > r.maxSize { + return fmt.Errorf("message exceeds max size (%v > %v)", length, r.maxSize) + } + if len(r.buf) < length { + r.buf = make([]byte, length) + } + buf := r.buf[:length] + if _, err := io.ReadFull(r.r, buf); err != nil { + return err + } + return proto.Unmarshal(buf, msg) +} + +func (r *varintReader) Close() error { + if r.closer != nil { + return r.closer.Close() + } + return nil +} + +func UnmarshalDelimited(data []byte, msg proto.Message) error { + return NewDelimitedReader(bytes.NewReader(data), len(data)).ReadMsg(msg) +} diff --git a/libs/protoio/writer.go b/libs/protoio/writer.go new file mode 100644 index 000000000..d4c66798f --- /dev/null +++ b/libs/protoio/writer.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Modified from original GoGo Protobuf to return number of bytes written. + +package protoio + +import ( + "bytes" + "encoding/binary" + "io" + + "github.com/gogo/protobuf/proto" +) + +// NewDelimitedWriter writes a varint-delimited Protobuf message to a writer. It is +// equivalent to the gogoproto NewDelimitedWriter, except WriteMsg() also returns the +// number of bytes written, which is necessary in the p2p package. +func NewDelimitedWriter(w io.Writer) WriteCloser { + return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil} +} + +type varintWriter struct { + w io.Writer + lenBuf []byte + buffer []byte +} + +func (w *varintWriter) WriteMsg(msg proto.Message) (int, error) { + if m, ok := msg.(marshaler); ok { + n, ok := getSize(m) + if ok { + if n+binary.MaxVarintLen64 >= len(w.buffer) { + w.buffer = make([]byte, n+binary.MaxVarintLen64) + } + lenOff := binary.PutUvarint(w.buffer, uint64(n)) + _, err := m.MarshalTo(w.buffer[lenOff:]) + if err != nil { + return 0, err + } + _, err = w.w.Write(w.buffer[:lenOff+n]) + return lenOff + n, err + } + } + + // fallback + data, err := proto.Marshal(msg) + if err != nil { + return 0, err + } + length := uint64(len(data)) + n := binary.PutUvarint(w.lenBuf, length) + _, err = w.w.Write(w.lenBuf[:n]) + if err != nil { + return 0, err + } + _, err = w.w.Write(data) + return len(data) + n, err +} + +func (w *varintWriter) Close() error { + if closer, ok := w.w.(io.Closer); ok { + return closer.Close() + } + return nil +} + +func MarshalDelimited(msg proto.Message) ([]byte, error) { + var buf bytes.Buffer + _, err := NewDelimitedWriter(&buf).WriteMsg(msg) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index 34bb2a88f..6abd5de5c 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -15,8 +15,13 @@ import ( func TestExample(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription, err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'")) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 2a89e7591..f48ee5b86 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -36,11 +36,11 @@ package pubsub import ( "context" - "sync" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) type operation int @@ -96,7 +96,7 @@ type Server struct { // check if we have subscription before // subscribing or unsubscribing - mtx sync.RWMutex + mtx tmsync.RWMutex subscriptions map[string]map[string]struct{} // subscriber -> query (string) -> empty struct } @@ -410,7 +410,7 @@ func (state *state) send(msg interface{}, events map[string][]string) error { match, err := q.Matches(events) if err != nil { - return errors.Wrapf(err, "failed to match against query %s", q.String()) + return fmt.Errorf("failed to match against query %s: %w", q.String(), err) } if match { diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index f9dd592d1..8482a13fa 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -23,8 +23,13 @@ const ( func TestSubscribe(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) @@ -63,15 +68,22 @@ func TestSubscribe(t *testing.T) { func TestSubscribeWithCapacity(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() assert.Panics(t, func() { - s.Subscribe(ctx, clientID, query.Empty{}, -1) + _, err = s.Subscribe(ctx, clientID, query.Empty{}, -1) + require.NoError(t, err) }) assert.Panics(t, func() { - s.Subscribe(ctx, clientID, query.Empty{}, 0) + _, err = s.Subscribe(ctx, clientID, query.Empty{}, 0) + require.NoError(t, err) }) subscription, err := s.Subscribe(ctx, clientID, query.Empty{}, 1) require.NoError(t, err) @@ -83,8 +95,13 @@ func TestSubscribeWithCapacity(t *testing.T) { func TestSubscribeUnbuffered(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.Empty{}) @@ -113,8 +130,13 @@ func TestSubscribeUnbuffered(t *testing.T) { func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription, err := s.Subscribe(ctx, clientID, query.Empty{}) @@ -130,8 +152,13 @@ func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { func TestDifferentClients(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription1, err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'")) @@ -171,7 +198,11 @@ func TestSubscribeDuplicateKeys(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) require.NoError(t, s.Start()) - defer s.Stop() + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) testCases := []struct { query string @@ -220,8 +251,13 @@ func TestSubscribeDuplicateKeys(t *testing.T) { func TestClientSubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() q := query.MustParse("tm.events.type='NewBlock'") @@ -244,8 +280,13 @@ func TestClientSubscribesTwice(t *testing.T) { func TestUnsubscribe(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) @@ -263,11 +304,16 @@ func TestUnsubscribe(t *testing.T) { func TestClientUnsubscribesTwice(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() - _, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) + _, err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) require.NoError(t, err) @@ -281,11 +327,16 @@ func TestClientUnsubscribesTwice(t *testing.T) { func TestResubscribe(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() - _, err := s.Subscribe(ctx, clientID, query.Empty{}) + _, err = s.Subscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) err = s.Unsubscribe(ctx, clientID, query.Empty{}) require.NoError(t, err) @@ -300,8 +351,13 @@ func TestResubscribe(t *testing.T) { func TestUnsubscribeAll(t *testing.T) { s := pubsub.NewServer() s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) ctx := context.Background() subscription1, err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) @@ -351,8 +407,14 @@ func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000 func benchmarkNClients(n int, b *testing.B) { s := pubsub.NewServer() - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(b, err) + + b.Cleanup(func() { + if err := s.Stop(); err != nil { + b.Error(err) + } + }) ctx := context.Background() for i := 0; i < n; i++ { @@ -379,18 +441,24 @@ func benchmarkNClients(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithEvents( + err = s.PublishWithEvents( ctx, "Gamora", - map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}}, + map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(rune(i))}}, ) + require.NoError(b, err) } } func benchmarkNClientsOneQuery(n int, b *testing.B) { s := pubsub.NewServer() - s.Start() - defer s.Stop() + err := s.Start() + require.NoError(b, err) + b.Cleanup(func() { + if err := s.Stop(); err != nil { + b.Error(err) + } + }) ctx := context.Background() q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") @@ -414,13 +482,13 @@ func benchmarkNClientsOneQuery(n int, b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {"1"}}) + err = s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, + "abci.Invoices.Number": {"1"}}) + require.NoError(b, err) } } -/////////////////////////////////////////////////////////////////////////////// -/// HELPERS -/////////////////////////////////////////////////////////////////////////////// +// HELPERS func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { select { diff --git a/libs/pubsub/query/peg.go b/libs/pubsub/query/peg.go index 3444dab34..816589f02 100644 --- a/libs/pubsub/query/peg.go +++ b/libs/pubsub/query/peg.go @@ -1,4 +1,3 @@ -// nolint package query //go:generate peg -inline -switch query.peg diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 247653394..cf6903ccf 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -15,8 +15,6 @@ import ( "strconv" "strings" "time" - - "github.com/pkg/errors" ) var ( @@ -411,7 +409,7 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) v, err = time.Parse(DateLayout, value) } if err != nil { - return false, errors.Wrapf(err, "failed to convert value %v from event attribute to time.Time", value) + return false, fmt.Errorf("failed to convert value %v from event attribute to time.Time: %w", value, err) } switch op { @@ -436,7 +434,7 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) // try our best to convert value from tags to float64 v, err := strconv.ParseFloat(filteredValue, 64) if err != nil { - return false, errors.Wrapf(err, "failed to convert value %v from event attribute to float64", filteredValue) + return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) } switch op { @@ -462,7 +460,7 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) if strings.ContainsAny(filteredValue, ".") { v1, err := strconv.ParseFloat(filteredValue, 64) if err != nil { - return false, errors.Wrapf(err, "failed to convert value %v from event attribute to float64", filteredValue) + return false, fmt.Errorf("failed to convert value %v from event attribute to float64: %w", filteredValue, err) } v = int64(v1) @@ -471,7 +469,7 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) // try our best to convert value from tags to int64 v, err = strconv.ParseInt(filteredValue, 10, 64) if err != nil { - return false, errors.Wrapf(err, "failed to convert value %v from event attribute to int64", filteredValue) + return false, fmt.Errorf("failed to convert value %v from event attribute to int64: %w", filteredValue, err) } } diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index cf3923584..4d6f605c7 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -2,7 +2,8 @@ package pubsub import ( "errors" - "sync" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) var ( @@ -23,7 +24,7 @@ type Subscription struct { out chan Message cancelled chan struct{} - mtx sync.RWMutex + mtx tmsync.RWMutex err error } diff --git a/libs/rand/random.go b/libs/rand/random.go index ddd4432cb..41d04a440 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -3,8 +3,9 @@ package rand import ( crand "crypto/rand" mrand "math/rand" - "sync" "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) const ( @@ -19,7 +20,7 @@ const ( // All of the methods here are suitable for concurrent use. // This is achieved by using a mutex lock on all of the provided methods. type Rand struct { - sync.Mutex + tmsync.Mutex rand *mrand.Rand } @@ -47,7 +48,7 @@ func (r *Rand) init() { } func (r *Rand) reset(seed int64) { - r.rand = mrand.New(mrand.NewSource(seed)) + r.rand = mrand.New(mrand.NewSource(seed)) // nolint:gosec // G404: Use of weak random number generator } //---------------------------------------- @@ -148,6 +149,10 @@ func (r *Rand) Seed(seed int64) { // Str constructs a random alphanumeric string of given length. func (r *Rand) Str(length int) string { + if length <= 0 { + return "" + } + chars := []byte{} MAIN_LOOP: for { diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 0f9962530..7abc6f4fb 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -18,7 +18,8 @@ func (testService) OnReset() error { func TestBaseServiceWait(t *testing.T) { ts := &testService{} ts.BaseService = *NewBaseService(nil, "TestService", ts) - ts.Start() + err := ts.Start() + require.NoError(t, err) waitFinished := make(chan struct{}) go func() { @@ -26,7 +27,7 @@ func TestBaseServiceWait(t *testing.T) { waitFinished <- struct{}{} }() - go ts.Stop() + go ts.Stop() //nolint:errcheck // ignore for tests select { case <-waitFinished: @@ -39,12 +40,14 @@ func TestBaseServiceWait(t *testing.T) { func TestBaseServiceReset(t *testing.T) { ts := &testService{} ts.BaseService = *NewBaseService(nil, "TestService", ts) - ts.Start() + err := ts.Start() + require.NoError(t, err) - err := ts.Reset() + err = ts.Reset() require.Error(t, err, "expected cant reset service error") - ts.Stop() + err = ts.Stop() + require.NoError(t, err) err = ts.Reset() require.NoError(t, err) diff --git a/libs/sync/deadlock.go b/libs/sync/deadlock.go new file mode 100644 index 000000000..637d6fbb1 --- /dev/null +++ b/libs/sync/deadlock.go @@ -0,0 +1,17 @@ +// +build deadlock + +package sync + +import ( + deadlock "github.com/sasha-s/go-deadlock" +) + +// A Mutex is a mutual exclusion lock. +type Mutex struct { + deadlock.Mutex +} + +// An RWMutex is a reader/writer mutual exclusion lock. +type RWMutex struct { + deadlock.RWMutex +} diff --git a/libs/sync/sync.go b/libs/sync/sync.go new file mode 100644 index 000000000..a0880e7de --- /dev/null +++ b/libs/sync/sync.go @@ -0,0 +1,15 @@ +// +build !deadlock + +package sync + +import "sync" + +// A Mutex is a mutual exclusion lock. +type Mutex struct { + sync.Mutex +} + +// An RWMutex is a reader/writer mutual exclusion lock. +type RWMutex struct { + sync.RWMutex +} diff --git a/libs/tempfile/tempfile.go b/libs/tempfile/tempfile.go index 38d987698..922ed5722 100644 --- a/libs/tempfile/tempfile.go +++ b/libs/tempfile/tempfile.go @@ -1,14 +1,15 @@ package tempfile import ( - fmt "fmt" + "fmt" "io" "os" "path/filepath" "strconv" "strings" - "sync" "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) const ( @@ -31,7 +32,7 @@ const ( var ( atomicWriteFileRand uint64 - atomicWriteFileRandMu sync.Mutex + atomicWriteFileRandMu tmsync.Mutex ) func writeFileRandReseed() uint64 { diff --git a/libs/tempfile/tempfile_test.go b/libs/tempfile/tempfile_test.go index c11009f72..9d07f806b 100644 --- a/libs/tempfile/tempfile_test.go +++ b/libs/tempfile/tempfile_test.go @@ -4,7 +4,7 @@ package tempfile import ( "bytes" - fmt "fmt" + "fmt" "io/ioutil" "os" testing "testing" @@ -27,7 +27,7 @@ func TestWriteFileAtomic(t *testing.T) { } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0664); err != nil { + if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { t.Fatal(err) } @@ -74,16 +74,18 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) - require.Nil(t, err) - f.WriteString(testString) - WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + require.NoError(t, err) + _, err = f.WriteString(testString) + require.NoError(t, err) + err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + require.NoError(t, err) // Check that the first atomic file was untouched firstAtomicFileBytes, err := ioutil.ReadFile(fname) - require.Nil(t, err, "Error reading first atomic file") + require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct resultantFileBytes, err := ioutil.ReadFile(fileToWrite) - require.Nil(t, err, "Error reading resultant file") + require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") // Check that the intermediate write file was deleted @@ -113,7 +115,8 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { fname := "/tmp/" + atomicWriteFilePrefix + fileRand f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) require.Nil(t, err) - f.WriteString(fmt.Sprintf(testString, i)) + _, err = f.WriteString(fmt.Sprintf(testString, i)) + require.NoError(t, err) defer os.Remove(fname) } @@ -121,7 +124,8 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { // Defer here, in case there is a panic in WriteFileAtomic. defer os.Remove(fileToWrite) - WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + err := WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) + require.NoError(t, err) // Check that all intermittent atomic file were untouched atomicWriteFileRand = defaultSeed for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { diff --git a/libs/timer/throttle_timer.go b/libs/timer/throttle_timer.go index 76db87ee8..7f5660c05 100644 --- a/libs/timer/throttle_timer.go +++ b/libs/timer/throttle_timer.go @@ -1,8 +1,9 @@ package timer import ( - "sync" "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) /* @@ -17,7 +18,7 @@ type ThrottleTimer struct { quit chan struct{} dur time.Duration - mtx sync.Mutex + mtx tmsync.Mutex timer *time.Timer isSet bool } diff --git a/libs/timer/throttle_timer_test.go b/libs/timer/throttle_timer_test.go index 894447974..af990fc6d 100644 --- a/libs/timer/throttle_timer_test.go +++ b/libs/timer/throttle_timer_test.go @@ -1,18 +1,19 @@ package timer import ( - "sync" "testing" "time" // make govet noshadow happy... asrt "github.com/stretchr/testify/assert" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) type thCounter struct { input chan struct{} - mtx sync.Mutex + mtx tmsync.Mutex count int } @@ -66,14 +67,16 @@ func TestThrottle(test *testing.T) { time.Sleep(longwait) assert.Equal(2, c.Count()) - // send 12, over 2 delay sections, adds 3 + // send 12, over 2 delay sections, adds 3 or more. It + // is possible for more to be added if the overhead + // in executing the loop is large short := time.Duration(ms/5) * time.Millisecond for i := 0; i < 13; i++ { t.Set() time.Sleep(short) } time.Sleep(longwait) - assert.Equal(5, c.Count()) + assert.LessOrEqual(5, c.Count()) close(t.Ch) } diff --git a/light/client.go b/light/client.go new file mode 100644 index 000000000..17c665b2b --- /dev/null +++ b/light/client.go @@ -0,0 +1,1047 @@ +package light + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/light/store" + "github.com/tendermint/tendermint/types" +) + +type mode byte + +const ( + sequential mode = iota + 1 + skipping + + defaultPruningSize = 1000 + defaultMaxRetryAttempts = 10 + // For verifySkipping, when using the cache of headers from the previous batch, + // they will always be at a height greater than 1/2 (normal verifySkipping) so to + // find something in between the range, 9/16 is used. + verifySkippingNumerator = 9 + verifySkippingDenominator = 16 + + // 10s should cover most of the clients. + // References: + // - http://vancouver-webpages.com/time/web.html + // - https://blog.codinghorror.com/keeping-time-on-the-pc/ + defaultMaxClockDrift = 10 * time.Second +) + +// Option sets a parameter for the light client. +type Option func(*Client) + +// SequentialVerification option configures the light client to sequentially +// check the blocks (every block, in ascending height order). Note this is +// much slower than SkippingVerification, albeit more secure. +func SequentialVerification() Option { + return func(c *Client) { + c.verificationMode = sequential + } +} + +// SkippingVerification option configures the light client to skip blocks as +// long as {trustLevel} of the old validator set signed the new header. The +// verifySkipping algorithm from the specification is used for finding the minimal +// "trust path". +// +// trustLevel - fraction of the old validator set (in terms of voting power), +// which must sign the new header in order for us to trust it. NOTE this only +// applies to non-adjacent headers. For adjacent headers, sequential +// verification is used. +func SkippingVerification(trustLevel tmmath.Fraction) Option { + return func(c *Client) { + c.verificationMode = skipping + c.trustLevel = trustLevel + } +} + +// PruningSize option sets the maximum amount of light blocks that the light +// client stores. When Prune() is run, all light blocks that are earlier than +// the h amount of light blocks will be removed from the store. +// Default: 1000. A pruning size of 0 will not prune the light client at all. +func PruningSize(h uint16) Option { + return func(c *Client) { + c.pruningSize = h + } +} + +// ConfirmationFunction option can be used to prompt to confirm an action. For +// example, remove newer headers if the light client is being reset with an +// older header. No confirmation is required by default! +func ConfirmationFunction(fn func(action string) bool) Option { + return func(c *Client) { + c.confirmationFn = fn + } +} + +// Logger option can be used to set a logger for the client. +func Logger(l log.Logger) Option { + return func(c *Client) { + c.logger = l + } +} + +// MaxRetryAttempts option can be used to set max attempts before replacing +// primary with a witness. +func MaxRetryAttempts(max uint16) Option { + return func(c *Client) { + c.maxRetryAttempts = max + } +} + +// MaxClockDrift defines how much new header's time can drift into +// the future. Default: 10s. +func MaxClockDrift(d time.Duration) Option { + return func(c *Client) { + c.maxClockDrift = d + } +} + +// Client represents a light client, connected to a single chain, which gets +// light blocks from a primary provider, verifies them either sequentially or by +// skipping some and stores them in a trusted store (usually, a local FS). +// +// Default verification: SkippingVerification(DefaultTrustLevel) +type Client struct { + chainID string + trustingPeriod time.Duration // see TrustOptions.Period + verificationMode mode + trustLevel tmmath.Fraction + maxRetryAttempts uint16 // see MaxRetryAttempts option + maxClockDrift time.Duration + + // Mutex for locking during changes of the light clients providers + providerMutex tmsync.Mutex + // Primary provider of new headers. + primary provider.Provider + // Providers used to "witness" new headers. + witnesses []provider.Provider + + // Where trusted light blocks are stored. + trustedStore store.Store + // Highest trusted light block from the store (height=H). + latestTrustedBlock *types.LightBlock + + // See RemoveNoLongerTrustedHeadersPeriod option + pruningSize uint16 + // See ConfirmationFunction option + confirmationFn func(action string) bool + + quit chan struct{} + + logger log.Logger +} + +// NewClient returns a new light client. It returns an error if it fails to +// obtain the light block from the primary or they are invalid (e.g. trust +// hash does not match with the one from the headers). +// +// Witnesses are providers, which will be used for cross-checking the primary +// provider. At least one witness must be given when skipping verification is +// used (default). A witness can become a primary iff the current primary is +// unavailable. +// +// See all Option(s) for the additional configuration. +func NewClient( + ctx context.Context, + chainID string, + trustOptions TrustOptions, + primary provider.Provider, + witnesses []provider.Provider, + trustedStore store.Store, + options ...Option) (*Client, error) { + + if err := trustOptions.ValidateBasic(); err != nil { + return nil, fmt.Errorf("invalid TrustOptions: %w", err) + } + + c, err := NewClientFromTrustedStore(chainID, trustOptions.Period, primary, witnesses, trustedStore, options...) + if err != nil { + return nil, err + } + + if c.latestTrustedBlock != nil { + c.logger.Info("Checking trusted light block using options") + if err := c.checkTrustedHeaderUsingOptions(ctx, trustOptions); err != nil { + return nil, err + } + } + + if c.latestTrustedBlock == nil || c.latestTrustedBlock.Height < trustOptions.Height { + c.logger.Info("Downloading trusted light block using options") + if err := c.initializeWithTrustOptions(ctx, trustOptions); err != nil { + return nil, err + } + } + + return c, err +} + +// NewClientFromTrustedStore initializes existing client from the trusted store. +// +// See NewClient +func NewClientFromTrustedStore( + chainID string, + trustingPeriod time.Duration, + primary provider.Provider, + witnesses []provider.Provider, + trustedStore store.Store, + options ...Option) (*Client, error) { + + c := &Client{ + chainID: chainID, + trustingPeriod: trustingPeriod, + verificationMode: skipping, + trustLevel: DefaultTrustLevel, + maxRetryAttempts: defaultMaxRetryAttempts, + maxClockDrift: defaultMaxClockDrift, + primary: primary, + witnesses: witnesses, + trustedStore: trustedStore, + pruningSize: defaultPruningSize, + confirmationFn: func(action string) bool { return true }, + quit: make(chan struct{}), + logger: log.NewNopLogger(), + } + + for _, o := range options { + o(c) + } + + // Validate the number of witnesses. + if len(c.witnesses) < 1 { + return nil, errNoWitnesses{} + } + + // Verify witnesses are all on the same chain. + for i, w := range witnesses { + if w.ChainID() != chainID { + return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", + i, w, w.ChainID(), chainID) + } + } + + // Validate trust level. + if err := ValidateTrustLevel(c.trustLevel); err != nil { + return nil, err + } + + if err := c.restoreTrustedLightBlock(); err != nil { + return nil, err + } + + return c, nil +} + +// restoreTrustedLightBlock loads the latest trusted light block from the store +func (c *Client) restoreTrustedLightBlock() error { + lastHeight, err := c.trustedStore.LastLightBlockHeight() + if err != nil { + return fmt.Errorf("can't get last trusted light block height: %w", err) + } + + if lastHeight > 0 { + trustedBlock, err := c.trustedStore.LightBlock(lastHeight) + if err != nil { + return fmt.Errorf("can't get last trusted light block: %w", err) + } + c.latestTrustedBlock = trustedBlock + c.logger.Info("Restored trusted light block", "height", lastHeight) + } + + return nil +} + +// if options.Height: +// +// 1) ahead of trustedLightBlock.Height => fetch light blocks (same height as +// trustedLightBlock) from primary provider and check it's hash matches the +// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks +// before) +// +// 2) equals trustedLightBlock.Height => check options.Hash matches the +// trustedLightBlock's hash (if not, remove trustedLightBlock and all the light blocks +// before) +// +// 3) behind trustedLightBlock.Height => remove all the light blocks between +// options.Height and trustedLightBlock.Height, update trustedLightBlock, then +// check options.Hash matches the trustedLightBlock's hash (if not, remove +// trustedLightBlock and all the light blocks before) +// +// The intuition here is the user is always right. I.e. if she decides to reset +// the light client with an older header, there must be a reason for it. +func (c *Client) checkTrustedHeaderUsingOptions(ctx context.Context, options TrustOptions) error { + var primaryHash []byte + switch { + case options.Height > c.latestTrustedBlock.Height: + h, err := c.lightBlockFromPrimary(ctx, c.latestTrustedBlock.Height) + if err != nil { + return err + } + primaryHash = h.Hash() + case options.Height == c.latestTrustedBlock.Height: + primaryHash = options.Hash + case options.Height < c.latestTrustedBlock.Height: + c.logger.Info("Client initialized with old header (trusted is more recent)", + "old", options.Height, + "trustedHeight", c.latestTrustedBlock.Height, + "trustedHash", hash2str(c.latestTrustedBlock.Hash())) + + action := fmt.Sprintf( + "Rollback to %d (%X)? Note this will remove newer light blocks up to %d (%X)", + options.Height, options.Hash, + c.latestTrustedBlock.Height, c.latestTrustedBlock.Hash()) + if c.confirmationFn(action) { + // remove all the headers (options.Height, trustedHeader.Height] + err := c.cleanupAfter(options.Height) + if err != nil { + return fmt.Errorf("cleanupAfter(%d): %w", options.Height, err) + } + + c.logger.Info("Rolled back to older header (newer headers were removed)", + "old", options.Height) + } else { + return nil + } + + primaryHash = options.Hash + } + + if !bytes.Equal(primaryHash, c.latestTrustedBlock.Hash()) { + c.logger.Info("Prev. trusted header's hash (h1) doesn't match hash from primary provider (h2)", + "h1", hash2str(c.latestTrustedBlock.Hash()), "h2", hash2str(primaryHash)) + + action := fmt.Sprintf( + "Prev. trusted header's hash %X doesn't match hash %X from primary provider. Remove all the stored light blocks?", + c.latestTrustedBlock.Hash(), primaryHash) + if c.confirmationFn(action) { + err := c.Cleanup() + if err != nil { + return fmt.Errorf("failed to cleanup: %w", err) + } + } else { + return errors.New("refused to remove the stored light blocks despite hashes mismatch") + } + } + + return nil +} + +// initializeWithTrustOptions fetches the weakly-trusted light block from +// primary provider. +func (c *Client) initializeWithTrustOptions(ctx context.Context, options TrustOptions) error { + // 1) Fetch and verify the light block. + l, err := c.lightBlockFromPrimary(ctx, options.Height) + if err != nil { + return err + } + + // NOTE: - Verify func will check if it's expired or not. + // - h.Time is not being checked against time.Now() because we don't + // want to add yet another argument to NewClient* functions. + if err := l.ValidateBasic(c.chainID); err != nil { + return err + } + + if !bytes.Equal(l.Hash(), options.Hash) { + return fmt.Errorf("expected header's hash %X, but got %X", options.Hash, l.Hash()) + } + + // 2) Ensure that +2/3 of validators signed correctly. + err = l.ValidatorSet.VerifyCommitLight(c.chainID, l.Commit.BlockID, l.Height, l.Commit) + if err != nil { + return fmt.Errorf("invalid commit: %w", err) + } + + // 3) Cross-verify with witnesses to ensure everybody has the same state. + if err := c.compareFirstHeaderWithWitnesses(ctx, l.SignedHeader); err != nil { + return err + } + + // 4) Persist both of them and continue. + return c.updateTrustedLightBlock(l) +} + +// TrustedLightBlock returns a trusted light block at the given height (0 - the latest). +// +// It returns an error if: +// - there are some issues with the trusted store, although that should not +// happen normally; +// - negative height is passed; +// - header has not been verified yet and is therefore not in the store +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) TrustedLightBlock(height int64) (*types.LightBlock, error) { + height, err := c.compareWithLatestHeight(height) + if err != nil { + return nil, err + } + return c.trustedStore.LightBlock(height) +} + +func (c *Client) compareWithLatestHeight(height int64) (int64, error) { + latestHeight, err := c.LastTrustedHeight() + if err != nil { + return 0, fmt.Errorf("can't get last trusted height: %w", err) + } + if latestHeight == -1 { + return 0, errors.New("no headers exist") + } + + switch { + case height > latestHeight: + return 0, fmt.Errorf("unverified header/valset requested (latest: %d)", latestHeight) + case height == 0: + return latestHeight, nil + case height < 0: + return 0, errors.New("negative height") + } + + return height, nil +} + +// Update attempts to advance the state by downloading the latest light +// block and verifying it. It returns a new light block on a successful +// update. Otherwise, it returns nil (plus an error, if any). +func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { + lastTrustedHeight, err := c.LastTrustedHeight() + if err != nil { + return nil, fmt.Errorf("can't get last trusted height: %w", err) + } + + if lastTrustedHeight == -1 { + // no light blocks yet => wait + return nil, nil + } + + latestBlock, err := c.lightBlockFromPrimary(ctx, 0) + if err != nil { + return nil, err + } + + if latestBlock.Height > lastTrustedHeight { + err = c.verifyLightBlock(ctx, latestBlock, now) + if err != nil { + return nil, err + } + c.logger.Info("Advanced to new state", "height", latestBlock.Height, "hash", hash2str(latestBlock.Hash())) + return latestBlock, nil + } + + return nil, nil +} + +// VerifyLightBlockAtHeight fetches the light block at the given height +// and verifies it. It returns the block immediately if it exists in +// the trustedStore (no verification is needed). +// +// height must be > 0. +// +// It returns provider.ErrlightBlockNotFound if light block is not found by +// primary. +// +// It will replace the primary provider if an error from a request to the provider occurs +func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { + if height <= 0 { + return nil, errors.New("negative or zero height") + } + + // Check if the light block already verified. + h, err := c.TrustedLightBlock(height) + if err == nil { + c.logger.Info("Header has already been verified", "height", height, "hash", hash2str(h.Hash())) + // Return already trusted light block + return h, nil + } + + // Request the light block from primary + l, err := c.lightBlockFromPrimary(ctx, height) + if err != nil { + return nil, err + } + + return l, c.verifyLightBlock(ctx, l, now) +} + +// VerifyHeader verifies a new header against the trusted state. It returns +// immediately if newHeader exists in trustedStore (no verification is +// needed). Else it performs one of the two types of verification: +// +// SequentialVerification: verifies that 2/3 of the trusted validator set has +// signed the new header. If the headers are not adjacent, **all** intermediate +// headers will be requested. Intermediate headers are not saved to database. +// +// SkippingVerification(trustLevel): verifies that {trustLevel} of the trusted +// validator set has signed the new header. If it's not the case and the +// headers are not adjacent, verifySkipping is performed and necessary (not all) +// intermediate headers will be requested. See the specification for details. +// Intermediate headers are not saved to database. +// https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md +// +// If the header, which is older than the currently trusted header, is +// requested and the light client does not have it, VerifyHeader will perform: +// a) verifySkipping verification if nearest trusted header is found & not expired +// b) backwards verification in all other cases +// +// It returns ErrOldHeaderExpired if the latest trusted header expired. +// +// If the primary provides an invalid header (ErrInvalidHeader), it is rejected +// and replaced by another provider until all are exhausted. +// +// If, at any moment, a LightBlock is not found by the primary provider as part of +// verification then the provider will be replaced by another and the process will +// restart. +func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now time.Time) error { + if newHeader == nil { + return errors.New("nil header") + } + if newHeader.Height <= 0 { + return errors.New("negative or zero height") + } + + // Check if newHeader already verified. + l, err := c.TrustedLightBlock(newHeader.Height) + if err == nil { + // Make sure it's the same header. + if !bytes.Equal(l.Hash(), newHeader.Hash()) { + return fmt.Errorf("existing trusted header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) + } + c.logger.Info("Header has already been verified", + "height", newHeader.Height, "hash", hash2str(newHeader.Hash())) + return nil + } + + // Request the header and the vals. + l, err = c.lightBlockFromPrimary(ctx, newHeader.Height) + if err != nil { + return fmt.Errorf("failed to retrieve light block from primary to verify against: %w", err) + } + + if !bytes.Equal(l.Hash(), newHeader.Hash()) { + return fmt.Errorf("light block header %X does not match newHeader %X", l.Hash(), newHeader.Hash()) + } + + return c.verifyLightBlock(ctx, l, now) +} + +func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.LightBlock, now time.Time) error { + c.logger.Info("VerifyHeader", "height", newLightBlock.Height, "hash", hash2str(newLightBlock.Hash())) + + var ( + verifyFunc func(ctx context.Context, trusted *types.LightBlock, new *types.LightBlock, now time.Time) error + err error + ) + + switch c.verificationMode { + case sequential: + verifyFunc = c.verifySequential + case skipping: + verifyFunc = c.verifySkippingAgainstPrimary + default: + panic(fmt.Sprintf("Unknown verification mode: %b", c.verificationMode)) + } + + firstBlockHeight, err := c.FirstTrustedHeight() + if err != nil { + return fmt.Errorf("can't get first light block height: %w", err) + } + + switch { + // Verifying forwards + case newLightBlock.Height >= c.latestTrustedBlock.Height: + err = verifyFunc(ctx, c.latestTrustedBlock, newLightBlock, now) + + // Verifying backwards + case newLightBlock.Height < firstBlockHeight: + var firstBlock *types.LightBlock + firstBlock, err = c.trustedStore.LightBlock(firstBlockHeight) + if err != nil { + return fmt.Errorf("can't get first light block: %w", err) + } + err = c.backwards(ctx, firstBlock.Header, newLightBlock.Header) + + // Verifying between first and last trusted light block + default: + var closestBlock *types.LightBlock + closestBlock, err = c.trustedStore.LightBlockBefore(newLightBlock.Height) + if err != nil { + return fmt.Errorf("can't get signed header before height %d: %w", newLightBlock.Height, err) + } + err = verifyFunc(ctx, closestBlock, newLightBlock, now) + } + if err != nil { + c.logger.Error("Can't verify", "err", err) + return err + } + + // Once verified, save and return + return c.updateTrustedLightBlock(newLightBlock) +} + +// see VerifyHeader +func (c *Client) verifySequential( + ctx context.Context, + trustedBlock *types.LightBlock, + newLightBlock *types.LightBlock, + now time.Time) error { + + var ( + verifiedBlock = trustedBlock + interimBlock *types.LightBlock + err error + trace = []*types.LightBlock{trustedBlock} + ) + + for height := trustedBlock.Height + 1; height <= newLightBlock.Height; height++ { + // 1) Fetch interim light block if needed. + if height == newLightBlock.Height { // last light block + interimBlock = newLightBlock + } else { // intermediate light blocks + interimBlock, err = c.lightBlockFromPrimary(ctx, height) + if err != nil { + return ErrVerificationFailed{From: verifiedBlock.Height, To: height, Reason: err} + } + } + + // 2) Verify them + c.logger.Debug("Verify adjacent newLightBlock against verifiedBlock", + "trustedHeight", verifiedBlock.Height, + "trustedHash", hash2str(verifiedBlock.Hash()), + "newHeight", interimBlock.Height, + "newHash", hash2str(interimBlock.Hash())) + + err = VerifyAdjacent(verifiedBlock.SignedHeader, interimBlock.SignedHeader, interimBlock.ValidatorSet, + c.trustingPeriod, now, c.maxClockDrift) + if err != nil { + err := ErrVerificationFailed{From: verifiedBlock.Height, To: interimBlock.Height, Reason: err} + + switch errors.Unwrap(err).(type) { + case ErrInvalidHeader: + // If the target header is invalid, return immediately. + if err.To == newLightBlock.Height { + c.logger.Debug("Target header is invalid", "err", err) + return err + } + + // If some intermediate header is invalid, replace the primary and try + // again. + c.logger.Error("primary sent invalid header -> replacing", "err", err) + replaceErr := c.replacePrimaryProvider() + if replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + // return original error + return err + } + + replacementBlock, fErr := c.lightBlockFromPrimary(ctx, newLightBlock.Height) + if fErr != nil { + c.logger.Error("Can't fetch light block from primary", "err", fErr) + // return original error + return err + } + + if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) { + c.logger.Error("Replacement provider has a different light block", + "newHash", newLightBlock.Hash(), + "replHash", replacementBlock.Hash()) + // return original error + return err + } + + // attempt to verify header again + height-- + + continue + default: + return err + } + } + + // 3) Update verifiedBlock + verifiedBlock = interimBlock + + // 4) Add verifiedBlock to trace + trace = append(trace, verifiedBlock) + } + + // Compare header with the witnesses to ensure it's not a fork. + // More witnesses we have, more chance to notice one. + // + // CORRECTNESS ASSUMPTION: there's at least 1 correct full node + // (primary or one of the witnesses). + return c.detectDivergence(ctx, trace, now) +} + +// see VerifyHeader +// +// verifySkipping finds the middle light block between a trusted and new light block, +// reiterating the action until it verifies a light block. A cache of light blocks +// requested from source is kept such that when a verification is made, and the +// light client tries again to verify the new light block in the middle, the light +// client does not need to ask for all the same light blocks again. +func (c *Client) verifySkipping( + ctx context.Context, + source provider.Provider, + trustedBlock *types.LightBlock, + newLightBlock *types.LightBlock, + now time.Time) ([]*types.LightBlock, error) { + + var ( + blockCache = []*types.LightBlock{newLightBlock} + depth = 0 + + verifiedBlock = trustedBlock + trace = []*types.LightBlock{trustedBlock} + ) + + for { + c.logger.Debug("Verify non-adjacent newHeader against verifiedBlock", + "trustedHeight", verifiedBlock.Height, + "trustedHash", hash2str(verifiedBlock.Hash()), + "newHeight", blockCache[depth].Height, + "newHash", hash2str(blockCache[depth].Hash())) + + err := Verify(verifiedBlock.SignedHeader, verifiedBlock.ValidatorSet, blockCache[depth].SignedHeader, + blockCache[depth].ValidatorSet, c.trustingPeriod, now, c.maxClockDrift, c.trustLevel) + switch err.(type) { + case nil: + // Have we verified the last header + if depth == 0 { + trace = append(trace, newLightBlock) + return trace, nil + } + // If not, update the lower bound to the previous upper bound + verifiedBlock = blockCache[depth] + // Remove the light block at the lower bound in the header cache - it will no longer be needed + blockCache = blockCache[:depth] + // Reset the cache depth so that we start from the upper bound again + depth = 0 + // add verifiedBlock to the trace + trace = append(trace, verifiedBlock) + + case ErrNewValSetCantBeTrusted: + // do add another header to the end of the cache + if depth == len(blockCache)-1 { + pivotHeight := verifiedBlock.Height + (blockCache[depth].Height-verifiedBlock. + Height)*verifySkippingNumerator/verifySkippingDenominator + interimBlock, providerErr := source.LightBlock(ctx, pivotHeight) + if providerErr != nil { + return nil, ErrVerificationFailed{From: verifiedBlock.Height, To: pivotHeight, Reason: providerErr} + } + blockCache = append(blockCache, interimBlock) + } + depth++ + + default: + return nil, ErrVerificationFailed{From: verifiedBlock.Height, To: blockCache[depth].Height, Reason: err} + } + } +} + +// verifySkippingAgainstPrimary does verifySkipping plus it compares new header with +// witnesses and replaces primary if it sends the light client an invalid header +func (c *Client) verifySkippingAgainstPrimary( + ctx context.Context, + trustedBlock *types.LightBlock, + newLightBlock *types.LightBlock, + now time.Time) error { + + trace, err := c.verifySkipping(ctx, c.primary, trustedBlock, newLightBlock, now) + + switch errors.Unwrap(err).(type) { + case ErrInvalidHeader: + // If the target header is invalid, return immediately. + invalidHeaderHeight := err.(ErrVerificationFailed).To + if invalidHeaderHeight == newLightBlock.Height { + c.logger.Debug("Target header is invalid", "err", err) + return err + } + + // If some intermediate header is invalid, replace the primary and try + // again. + c.logger.Error("primary sent invalid header -> replacing", "err", err) + replaceErr := c.replacePrimaryProvider() + if replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + // return original error + return err + } + + replacementBlock, fErr := c.lightBlockFromPrimary(ctx, newLightBlock.Height) + if fErr != nil { + c.logger.Error("Can't fetch light block from primary", "err", fErr) + // return original error + return err + } + + if !bytes.Equal(replacementBlock.Hash(), newLightBlock.Hash()) { + c.logger.Error("Replacement provider has a different light block", + "newHash", newLightBlock.Hash(), + "replHash", replacementBlock.Hash()) + // return original error + return err + } + + // attempt to verify the header again + return c.verifySkippingAgainstPrimary(ctx, trustedBlock, replacementBlock, now) + case nil: + // Compare header with the witnesses to ensure it's not a fork. + // More witnesses we have, more chance to notice one. + // + // CORRECTNESS ASSUMPTION: there's at least 1 correct full node + // (primary or one of the witnesses). + if cmpErr := c.detectDivergence(ctx, trace, now); cmpErr != nil { + return cmpErr + } + default: + return err + } + + return nil +} + +// LastTrustedHeight returns a last trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) LastTrustedHeight() (int64, error) { + return c.trustedStore.LastLightBlockHeight() +} + +// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) FirstTrustedHeight() (int64, error) { + return c.trustedStore.FirstLightBlockHeight() +} + +// ChainID returns the chain ID the light client was configured with. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) ChainID() string { + return c.chainID +} + +// Primary returns the primary provider. +// +// NOTE: provider may be not safe for concurrent access. +func (c *Client) Primary() provider.Provider { + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + return c.primary +} + +// Witnesses returns the witness providers. +// +// NOTE: providers may be not safe for concurrent access. +func (c *Client) Witnesses() []provider.Provider { + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + return c.witnesses +} + +// Cleanup removes all the data (headers and validator sets) stored. Note: the +// client must be stopped at this point. +func (c *Client) Cleanup() error { + c.logger.Info("Removing all the data") + c.latestTrustedBlock = nil + return c.trustedStore.Prune(0) +} + +// cleanupAfter deletes all headers & validator sets after +height+. It also +// resets latestTrustedBlock to the latest header. +func (c *Client) cleanupAfter(height int64) error { + prevHeight := c.latestTrustedBlock.Height + + for { + h, err := c.trustedStore.LightBlockBefore(prevHeight) + if err == store.ErrLightBlockNotFound || (h != nil && h.Height <= height) { + break + } else if err != nil { + return fmt.Errorf("failed to get header before %d: %w", prevHeight, err) + } + + err = c.trustedStore.DeleteLightBlock(h.Height) + if err != nil { + c.logger.Error("can't remove a trusted header & validator set", "err", err, + "height", h.Height) + } + + prevHeight = h.Height + } + + c.latestTrustedBlock = nil + err := c.restoreTrustedLightBlock() + if err != nil { + return err + } + + return nil +} + +func (c *Client) updateTrustedLightBlock(l *types.LightBlock) error { + if err := c.trustedStore.SaveLightBlock(l); err != nil { + return fmt.Errorf("failed to save trusted header: %w", err) + } + + if c.pruningSize > 0 { + if err := c.trustedStore.Prune(c.pruningSize); err != nil { + return fmt.Errorf("prune: %w", err) + } + } + + if c.latestTrustedBlock == nil || l.Height > c.latestTrustedBlock.Height { + c.latestTrustedBlock = l + } + + return nil +} + +// backwards verification (see VerifyHeaderBackwards func in the spec) verifies +// headers before a trusted header. If a sent header is invalid the primary is +// replaced with another provider and the operation is repeated. +func (c *Client) backwards( + ctx context.Context, + trustedHeader *types.Header, + newHeader *types.Header) error { + + var ( + verifiedHeader = trustedHeader + interimHeader *types.Header + ) + + for verifiedHeader.Height > newHeader.Height { + interimBlock, err := c.lightBlockFromPrimary(ctx, verifiedHeader.Height-1) + if err != nil { + return fmt.Errorf("failed to obtain the header at height #%d: %w", verifiedHeader.Height-1, err) + } + interimHeader = interimBlock.Header + c.logger.Debug("Verify newHeader against verifiedHeader", + "trustedHeight", verifiedHeader.Height, + "trustedHash", hash2str(verifiedHeader.Hash()), + "newHeight", interimHeader.Height, + "newHash", hash2str(interimHeader.Hash())) + if err := VerifyBackwards(interimHeader, verifiedHeader); err != nil { + c.logger.Error("primary sent invalid header -> replacing", "err", err) + if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { + c.logger.Error("Can't replace primary", "err", replaceErr) + // return original error + return fmt.Errorf("verify backwards from %d to %d failed: %w", + verifiedHeader.Height, interimHeader.Height, err) + } + // we need to verify the header at the same height again + continue + } + verifiedHeader = interimHeader + } + + return nil +} + +// NOTE: requires a providerMutex locked. +func (c *Client) removeWitness(idx int) { + switch len(c.witnesses) { + case 0: + panic(fmt.Sprintf("wanted to remove %d element from empty witnesses slice", idx)) + case 1: + c.witnesses = make([]provider.Provider, 0) + default: + c.witnesses[idx] = c.witnesses[len(c.witnesses)-1] + c.witnesses = c.witnesses[:len(c.witnesses)-1] + } +} + +// replaceProvider takes the first alternative provider and promotes it as the +// primary provider. +func (c *Client) replacePrimaryProvider() error { + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + + if len(c.witnesses) <= 1 { + return errNoWitnesses{} + } + c.primary = c.witnesses[0] + c.witnesses = c.witnesses[1:] + c.logger.Info("Replacing primary with the first witness", "new_primary", c.primary) + + return nil +} + +// lightBlockFromPrimary retrieves the lightBlock from the primary provider +// at the specified height. Handles dropout by the primary provider by swapping +// with an alternative provider. +func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) { + c.providerMutex.Lock() + l, err := c.primary.LightBlock(ctx, height) + c.providerMutex.Unlock() + if err != nil { + c.logger.Debug("Error on light block request from primary", "error", err) + replaceErr := c.replacePrimaryProvider() + if replaceErr != nil { + return nil, fmt.Errorf("%v. Tried to replace primary but: %w", err.Error(), replaceErr) + } + // replace primary and request a light block again + return c.lightBlockFromPrimary(ctx, height) + } + return l, err +} + +// compareFirstHeaderWithWitnesses compares h with all witnesses. If any +// witness reports a different header than h, the function returns an error. +func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.SignedHeader) error { + compareCtx, cancel := context.WithCancel(ctx) + defer cancel() + + if len(c.witnesses) < 1 { + return errNoWitnesses{} + } + + errc := make(chan error, len(c.witnesses)) + for i, witness := range c.witnesses { + go c.compareNewHeaderWithWitness(compareCtx, errc, h, witness, i) + } + + witnessesToRemove := make([]int, 0, len(c.witnesses)) + + // handle errors from the header comparisons as they come in + for i := 0; i < cap(errc); i++ { + err := <-errc + + switch e := err.(type) { + case nil: + continue + case errConflictingHeaders: + c.logger.Error(fmt.Sprintf(`Witness #%d has a different header. Please check primary is correct +and remove witness. Otherwise, use the different primary`, e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex]) + return err + case errBadWitness: + // If witness sent us an invalid header, then remove it. If it didn't + // respond or couldn't find the block, then we ignore it and move on to + // the next witness. + if _, ok := e.Reason.(provider.ErrBadLightBlock); ok { + c.logger.Info("Witness sent us invalid header / vals -> removing it", "witness", c.witnesses[e.WitnessIndex]) + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + } + } + } + + for _, idx := range witnessesToRemove { + c.removeWitness(idx) + } + + return nil +} + +func hash2str(hash []byte) string { + return fmt.Sprintf("%X", hash) +} diff --git a/lite2/client_benchmark_test.go b/light/client_benchmark_test.go similarity index 55% rename from lite2/client_benchmark_test.go rename to light/client_benchmark_test.go index 5877dbc3c..eb02686b8 100644 --- a/lite2/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -1,16 +1,17 @@ -package lite_test +package light_test import ( + "context" "testing" "time" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" - lite "github.com/tendermint/tendermint/lite2" - "github.com/tendermint/tendermint/lite2/provider" - mockp "github.com/tendermint/tendermint/lite2/provider/mock" - dbs "github.com/tendermint/tendermint/lite2/store/db" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/light/provider" + mockp "github.com/tendermint/tendermint/light/provider/mock" + dbs "github.com/tendermint/tendermint/light/store/db" ) // NOTE: block is produced every minute. Make sure the verification time @@ -21,23 +22,24 @@ import ( // // Remember that none of these benchmarks account for network latency. var ( - benchmarkFullNode = mockp.New(GenMockNode(chainID, 1000, 100, 1, bTime)) - genesisHeader, _ = benchmarkFullNode.SignedHeader(1) + benchmarkFullNode = mockp.New(genMockNode(chainID, 1000, 100, 1, bTime)) + genesisBlock, _ = benchmarkFullNode.LightBlock(context.Background(), 1) ) func BenchmarkSequence(b *testing.B) { - c, err := lite.NewClient( + c, err := light.NewClient( + context.Background(), chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 24 * time.Hour, Height: 1, - Hash: genesisHeader.Hash(), + Hash: genesisBlock.Hash(), }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), - lite.SequentialVerification(), + light.Logger(log.TestingLogger()), + light.SequentialVerification(), ) if err != nil { b.Fatal(err) @@ -45,7 +47,7 @@ func BenchmarkSequence(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(context.Background(), 1000, bTime.Add(1000*time.Minute)) if err != nil { b.Fatal(err) } @@ -53,17 +55,18 @@ func BenchmarkSequence(b *testing.B) { } func BenchmarkBisection(b *testing.B) { - c, err := lite.NewClient( + c, err := light.NewClient( + context.Background(), chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 24 * time.Hour, Height: 1, - Hash: genesisHeader.Hash(), + Hash: genesisBlock.Hash(), }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) if err != nil { b.Fatal(err) @@ -71,7 +74,7 @@ func BenchmarkBisection(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(context.Background(), 1000, bTime.Add(1000*time.Minute)) if err != nil { b.Fatal(err) } @@ -79,18 +82,19 @@ func BenchmarkBisection(b *testing.B) { } func BenchmarkBackwards(b *testing.B) { - trustedHeader, _ := benchmarkFullNode.SignedHeader(0) - c, err := lite.NewClient( + trustedBlock, _ := benchmarkFullNode.LightBlock(context.Background(), 0) + c, err := light.NewClient( + context.Background(), chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 24 * time.Hour, - Height: trustedHeader.Height, - Hash: trustedHeader.Hash(), + Height: trustedBlock.Height, + Hash: trustedBlock.Hash(), }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) if err != nil { b.Fatal(err) @@ -98,7 +102,7 @@ func BenchmarkBackwards(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyHeaderAtHeight(1, bTime) + _, err = c.VerifyLightBlockAtHeight(context.Background(), 1, bTime) if err != nil { b.Fatal(err) } diff --git a/lite2/client_test.go b/light/client_test.go similarity index 52% rename from lite2/client_test.go rename to light/client_test.go index a54786ab7..13d550de7 100644 --- a/lite2/client_test.go +++ b/light/client_test.go @@ -1,6 +1,7 @@ -package lite_test +package light_test import ( + "context" "sync" "testing" "time" @@ -11,10 +12,10 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" - lite "github.com/tendermint/tendermint/lite2" - "github.com/tendermint/tendermint/lite2/provider" - mockp "github.com/tendermint/tendermint/lite2/provider/mock" - dbs "github.com/tendermint/tendermint/lite2/store/db" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/light/provider" + mockp "github.com/tendermint/tendermint/light/provider/mock" + dbs "github.com/tendermint/tendermint/light/store/db" "github.com/tendermint/tendermint/types" ) @@ -23,19 +24,20 @@ const ( ) var ( + ctx = context.Background() keys = genPrivKeys(4) vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) // 3/3 signed h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) // 3/3 signed h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour - trustOptions = lite.TrustOptions{ + trustOptions = light.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: h1.Hash(), @@ -53,18 +55,72 @@ var ( // last header (3/3 signed) 3: h3, } + l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} fullNode = mockp.New( chainID, headerSet, valSet, ) deadNode = mockp.NewDeadMock(chainID) - largeFullNode = mockp.New(GenMockNode(chainID, 10, 3, 0, bTime)) + largeFullNode = mockp.New(genMockNode(chainID, 10, 3, 0, bTime)) ) +func TestValidateTrustOptions(t *testing.T) { + testCases := []struct { + err bool + to light.TrustOptions + }{ + { + false, + trustOptions, + }, + { + true, + light.TrustOptions{ + Period: -1 * time.Hour, + Height: 1, + Hash: h1.Hash(), + }, + }, + { + true, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 0, + Hash: h1.Hash(), + }, + }, + { + true, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 1, + Hash: []byte("incorrect hash"), + }, + }, + } + + for _, tc := range testCases { + err := tc.to.ValidateBasic() + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } + +} + +func TestMock(t *testing.T) { + l, _ := fullNode.LightBlock(ctx, 3) + assert.Equal(t, int64(3), l.Height) +} + func TestClient_SequentialVerification(t *testing.T) { newKeys := genPrivKeys(4) newVals := newKeys.ToValidators(10, 1) + differentVals, _ := types.RandValidatorSet(10, 100) testCases := []struct { name string @@ -85,7 +141,7 @@ func TestClient_SequentialVerification(t *testing.T) { map[int64]*types.SignedHeader{ // different header 1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), }, map[int64]*types.ValidatorSet{ 1: vals, @@ -93,6 +149,26 @@ func TestClient_SequentialVerification(t *testing.T) { true, false, }, + { + "bad: no first signed header", + map[int64]*types.SignedHeader{}, + map[int64]*types.ValidatorSet{ + 1: differentVals, + }, + true, + true, + }, + { + "bad: different first validator set", + map[int64]*types.SignedHeader{ + 1: h1, + }, + map[int64]*types.ValidatorSet{ + 1: differentVals, + }, + true, + true, + }, { "bad: 1/3 signed interim header", map[int64]*types.SignedHeader{ @@ -100,10 +176,10 @@ func TestClient_SequentialVerification(t *testing.T) { 1: h1, // interim header (1/3 signed) 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), // last header (3/3 signed) 3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), }, valSet, false, @@ -116,10 +192,10 @@ func TestClient_SequentialVerification(t *testing.T) { 1: h1, // interim header (3/3 signed) 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), // last header (1/3 signed) 3: keys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), }, valSet, false, @@ -141,7 +217,8 @@ func TestClient_SequentialVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, mockp.New( @@ -155,7 +232,8 @@ func TestClient_SequentialVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - lite.SequentialVerification(), + light.SequentialVerification(), + light.Logger(log.TestingLogger()), ) if tc.initErr { @@ -165,7 +243,7 @@ func TestClient_SequentialVerification(t *testing.T) { require.NoError(t, err) - _, err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) if tc.verifyErr { assert.Error(t, err) } else { @@ -209,7 +287,7 @@ func TestClient_SkippingVerification(t *testing.T) { // trusted header 1: h1, 3: transitKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(transitKeys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(transitKeys)), }, map[int64]*types.ValidatorSet{ 1: vals, @@ -226,10 +304,10 @@ func TestClient_SkippingVerification(t *testing.T) { 1: h1, // interim header (3/3 signed) 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), // last header (0/4 of the original val set signed) 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), }, map[int64]*types.ValidatorSet{ 1: vals, @@ -246,10 +324,10 @@ func TestClient_SkippingVerification(t *testing.T) { 1: h1, // last header (0/4 of the original val set signed) 2: keys.GenSignedHeader(chainID, 2, bTime.Add(1*time.Hour), nil, vals, newVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, 0), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, 0), // last header (0/4 of the original val set signed) 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), }, map[int64]*types.ValidatorSet{ 1: vals, @@ -264,7 +342,8 @@ func TestClient_SkippingVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, mockp.New( @@ -278,7 +357,8 @@ func TestClient_SkippingVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - lite.SkippingVerification(lite.DefaultTrustLevel), + light.SkippingVerification(light.DefaultTrustLevel), + light.Logger(log.TestingLogger()), ) if tc.initErr { require.Error(t, err) @@ -287,7 +367,7 @@ func TestClient_SkippingVerification(t *testing.T) { require.NoError(t, err) - _, err = c.VerifyHeaderAtHeight(3, bTime.Add(3*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(3*time.Hour)) if tc.verifyErr { assert.Error(t, err) } else { @@ -295,32 +375,85 @@ func TestClient_SkippingVerification(t *testing.T) { } }) } + +} + +// start from a large light block to make sure that the pivot height doesn't select a height outside +// the appropriate range +func TestClientLargeBisectionVerification(t *testing.T) { + veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime)) + trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5) + require.NoError(t, err) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: trustedLightBlock.Height, + Hash: trustedLightBlock.Hash(), + }, + veryLargeFullNode, + []provider.Provider{veryLargeFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + light.SkippingVerification(light.DefaultTrustLevel), + ) + require.NoError(t, err) + h, err := c.Update(ctx, bTime.Add(100*time.Minute)) + assert.NoError(t, err) + h2, err := veryLargeFullNode.LightBlock(ctx, 100) + require.NoError(t, err) + assert.Equal(t, h, h2) +} + +func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: h1.Hash(), + }, + fullNode, + []provider.Provider{fullNode}, + dbs.New(dbm.NewMemDB(), chainID), + light.SkippingVerification(light.DefaultTrustLevel), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + require.NoError(t, err) + + // confirm that the client already doesn't have the light block + _, err = c.TrustedLightBlock(2) + require.Error(t, err) + + // verify using bisection the light block between the two trusted light blocks + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) + assert.NoError(t, err) } func TestClient_Cleanup(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - _, err = c.TrustedHeader(1) + _, err = c.TrustedLightBlock(1) require.NoError(t, err) err = c.Cleanup() require.NoError(t, err) - // Check no headers/valsets exist after Cleanup. - h, err := c.TrustedHeader(1) - assert.Error(t, err) - assert.Nil(t, h) - - valSet, _, err := c.TrustedValidatorSet(1) + // Check no light blocks exist after Cleanup. + l, err := c.TrustedLightBlock(1) assert.Error(t, err) - assert.Nil(t, valSet) + assert.Nil(t, l) } // trustedHeader.Height == options.Height @@ -328,41 +461,36 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - h, err := c.TrustedHeader(1) - assert.NoError(t, err) - assert.NotNil(t, h) - assert.Equal(t, h.Hash(), h1.Hash()) - - valSet, _, err := c.TrustedValidatorSet(1) + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) - } + assert.NotNil(t, l) + assert.Equal(t, l.Hash(), h1.Hash()) + assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) } // 2. options.Hash != trustedHeader.Hash { trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) - // header1 != header + // header1 != h1 header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) primary := mockp.New( chainID, @@ -373,9 +501,10 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { valSet, ) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -383,21 +512,15 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - h, err := c.TrustedHeader(1) - assert.NoError(t, err) - if assert.NotNil(t, h) { - assert.Equal(t, h.Hash(), header1.Hash()) - } - - valSet, _, err := c.TrustedValidatorSet(1) + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + if assert.NotNil(t, l) { + assert.Equal(t, l.Hash(), header1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) } } } @@ -407,12 +530,13 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: h2.Hash(), @@ -420,37 +544,31 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { fullNode, []provider.Provider{fullNode}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) // Check we still have the 1st header (+header+). - h, err := c.TrustedHeader(1) + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, h) - assert.Equal(t, h.Hash(), h1.Hash()) - - valSet, _, err := c.TrustedValidatorSet(1) - assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) - } + assert.NotNil(t, l) + assert.Equal(t, l.Hash(), h1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) } // 2. options.Hash != trustedHeader.Hash // This could happen if previous provider was lying to us. { trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) // header1 != header diffHeader1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) diffHeader2 := keys.GenSignedHeader(chainID, 2, bTime.Add(2*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) primary := mockp.New( chainID, @@ -461,9 +579,10 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { valSet, ) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: diffHeader2.Hash(), @@ -471,18 +590,14 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) // Check we no longer have the invalid 1st header (+header+). - h, err := c.TrustedHeader(1) - assert.Error(t, err) - assert.Nil(t, h) - - valSet, _, err := c.TrustedValidatorSet(1) + l, err := c.TrustedLightBlock(1) assert.Error(t, err) - assert.Nil(t, valSet) + assert.Nil(t, l) } } @@ -490,62 +605,59 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { + // load the first three headers into the trusted store trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) - //header2 := keys.GenSignedHeader(chainID, 2, bTime.Add(2*time.Hour), nil, vals, vals, - // []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) - err = trustedStore.SaveSignedHeaderAndValidatorSet(h2, vals) + err = trustedStore.SaveLightBlock(l2) require.NoError(t, err) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - // Check we still have the 1st header (+header+). - h, err := c.TrustedHeader(1) - assert.NoError(t, err) - assert.NotNil(t, h) - assert.Equal(t, h.Hash(), h1.Hash()) - - valSet, _, err := c.TrustedValidatorSet(1) + // Check we still have the 1st light block. + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) - } + assert.NotNil(t, l) + assert.Equal(t, l.Hash(), h1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) - // Check we no longer have 2nd header (+header2+). - h, err = c.TrustedHeader(2) + // Check we no longer have 2nd light block. + l, err = c.TrustedLightBlock(2) assert.Error(t, err) - assert.Nil(t, h) + assert.Nil(t, l) - valSet, _, err = c.TrustedValidatorSet(2) + l, err = c.TrustedLightBlock(3) assert.Error(t, err) - assert.Nil(t, valSet) + assert.Nil(t, l) } // 2. options.Hash != trustedHeader.Hash // This could happen if previous provider was lying to us. { trustedStore := dbs.New(dbm.NewMemDB(), chainID) - err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) + err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) // header1 != header header1 := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) header2 := keys.GenSignedHeader(chainID, 2, bTime.Add(2*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) - err = trustedStore.SaveSignedHeaderAndValidatorSet(header2, vals) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) + err = trustedStore.SaveLightBlock(&types.LightBlock{ + SignedHeader: header2, + ValidatorSet: vals, + }) require.NoError(t, err) primary := mockp.New( @@ -556,9 +668,10 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { valSet, ) - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -566,71 +679,58 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - // Check we have swapped invalid 1st header (+header+) with correct one (+header1+). - h, err := c.TrustedHeader(1) + // Check we have swapped invalid 1st light block (+lightblock+) with correct one (+lightblock2+). + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, h) - assert.Equal(t, h.Hash(), header1.Hash()) - - valSet, _, err := c.TrustedValidatorSet(1) - assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) - } - - // Check we no longer have invalid 2nd header (+header2+). - h, err = c.TrustedHeader(2) - assert.Error(t, err) - assert.Nil(t, h) + assert.NotNil(t, l) + assert.Equal(t, l.Hash(), header1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) - valSet, _, err = c.TrustedValidatorSet(2) + // Check we no longer have invalid 2nd light block (+lightblock2+). + l, err = c.TrustedLightBlock(2) assert.Error(t, err) - assert.Nil(t, valSet) + assert.Nil(t, l) } } func TestClient_Update(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) // should result in downloading & verifying header #3 - h, err := c.Update(bTime.Add(2 * time.Hour)) + l, err := c.Update(ctx, bTime.Add(2*time.Hour)) assert.NoError(t, err) - if assert.NotNil(t, h) { - assert.EqualValues(t, 3, h.Height) - } - - valSet, _, err := c.TrustedValidatorSet(3) - assert.NoError(t, err) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + if assert.NotNil(t, l) { + assert.EqualValues(t, 3, l.Height) + assert.NoError(t, l.ValidateBasic(chainID)) } } func TestClient_Concurrency(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) - _, err = c.VerifyHeaderAtHeight(2, bTime.Add(2*time.Hour)) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) require.NoError(t, err) var wg sync.WaitGroup @@ -639,7 +739,7 @@ func TestClient_Concurrency(t *testing.T) { go func() { defer wg.Done() - // NOTE: Cleanup, Stop, VerifyHeaderAtHeight and Verify are not supposed + // NOTE: Cleanup, Stop, VerifyLightBlockAtHeight and Verify are not supposed // to be concurrenly safe. assert.Equal(t, chainID, c.ChainID()) @@ -650,13 +750,9 @@ func TestClient_Concurrency(t *testing.T) { _, err = c.FirstTrustedHeight() assert.NoError(t, err) - h, err := c.TrustedHeader(1) + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, h) - - vals, _, err := c.TrustedValidatorSet(2) - assert.NoError(t, err) - assert.NotNil(t, vals) + assert.NotNil(t, l) }() } @@ -664,18 +760,19 @@ func TestClient_Concurrency(t *testing.T) { } func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { - c, err := lite.NewClient( + c, err := light.NewClient( + ctx, chainID, trustOptions, deadNode, []provider.Provider{fullNode, fullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), - lite.MaxRetryAttempts(1), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), ) require.NoError(t, err) - _, err = c.Update(bTime.Add(2 * time.Hour)) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) require.NoError(t, err) assert.NotEqual(t, c.Primary(), deadNode) @@ -684,10 +781,11 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { func TestClient_BackwardsVerification(t *testing.T) { { - trustHeader, _ := largeFullNode.SignedHeader(6) - c, err := lite.NewClient( + trustHeader, _ := largeFullNode.LightBlock(ctx, 6) + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 4 * time.Minute, Height: trustHeader.Height, Hash: trustHeader.Hash(), @@ -695,47 +793,43 @@ func TestClient_BackwardsVerification(t *testing.T) { largeFullNode, []provider.Provider{largeFullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) require.NoError(t, err) // 1) verify before the trusted header using backwards => expect no error - h, err := c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) + h, err := c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) require.NoError(t, err) if assert.NotNil(t, h) { assert.EqualValues(t, 5, h.Height) } // 2) untrusted header is expired but trusted header is not => expect no error - h, err = c.VerifyHeaderAtHeight(3, bTime.Add(8*time.Minute)) + h, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(8*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) // 3) already stored headers should return the header without error - h, err = c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) + h, err = c.VerifyLightBlockAtHeight(ctx, 5, bTime.Add(6*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) // 4a) First verify latest header - _, err = c.VerifyHeaderAtHeight(9, bTime.Add(9*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(ctx, 9, bTime.Add(9*time.Minute)) require.NoError(t, err) // 4b) Verify backwards using bisection => expect no error - _, err = c.VerifyHeaderAtHeight(7, bTime.Add(10*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(ctx, 7, bTime.Add(9*time.Minute)) assert.NoError(t, err) // shouldn't have verified this header in the process - _, err = c.TrustedHeader(8) + _, err = c.TrustedLightBlock(8) assert.Error(t, err) - // 5) trusted header has expired => expect error - _, err = c.VerifyHeaderAtHeight(1, bTime.Add(20*time.Minute)) + // 5) Try bisection method, but closest header (at 7) has expired + // so expect error + _, err = c.VerifyLightBlockAtHeight(ctx, 8, bTime.Add(12*time.Minute)) assert.Error(t, err) - // 6) Try bisection method, but closest header (at 7) has expired - // so change to backwards => expect no error - _, err = c.VerifyHeaderAtHeight(8, bTime.Add(12*time.Minute)) - assert.NoError(t, err) - } { testCases := []struct { @@ -747,8 +841,8 @@ func TestClient_BackwardsVerification(t *testing.T) { chainID, map[int64]*types.SignedHeader{ 1: h1, - 2: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + 2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), 3: h3, }, valSet, @@ -761,7 +855,7 @@ func TestClient_BackwardsVerification(t *testing.T) { map[int64]*types.SignedHeader{ 1: h1, 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), 3: h3, }, valSet, @@ -769,10 +863,11 @@ func TestClient_BackwardsVerification(t *testing.T) { }, } - for _, tc := range testCases { - c, err := lite.NewClient( + for idx, tc := range testCases { + c, err := light.NewClient( + ctx, chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 1 * time.Hour, Height: 3, Hash: h3.Hash(), @@ -780,12 +875,12 @@ func TestClient_BackwardsVerification(t *testing.T) { tc.provider, []provider.Provider{tc.provider}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) - require.NoError(t, err) + require.NoError(t, err, idx) - _, err = c.VerifyHeaderAtHeight(2, bTime.Add(1*time.Hour).Add(1*time.Second)) - assert.Error(t, err) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) + assert.Error(t, err, idx) } } } @@ -793,10 +888,10 @@ func TestClient_BackwardsVerification(t *testing.T) { func TestClient_NewClientFromTrustedStore(t *testing.T) { // 1) Initiate DB and fill with a "trusted" header db := dbs.New(dbm.NewMemDB(), chainID) - err := db.SaveSignedHeaderAndValidatorSet(h1, vals) + err := db.SaveLightBlock(l1) require.NoError(t, err) - c, err := lite.NewClientFromTrustedStore( + c, err := light.NewClientFromTrustedStore( chainID, trustPeriod, deadNode, @@ -805,33 +900,11 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { ) require.NoError(t, err) - // 2) Check header exists (deadNode is being used to ensure we're not getting + // 2) Check light block exists (deadNode is being used to ensure we're not getting // it from primary) - h, err := c.TrustedHeader(1) - assert.NoError(t, err) - assert.EqualValues(t, 1, h.Height) - - valSet, _, err := c.TrustedValidatorSet(1) + h, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, valSet) - if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) - } -} - -func TestNewClientErrorsIfAllWitnessesUnavailable(t *testing.T) { - _, err := lite.NewClient( - chainID, - trustOptions, - fullNode, - []provider.Provider{deadNode, deadNode}, - dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), - lite.MaxRetryAttempts(1), - ) - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "awaiting response from all witnesses exceeded dropout time") - } + assert.EqualValues(t, l1.Height, h.Height) } func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { @@ -841,7 +914,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { map[int64]*types.SignedHeader{ 1: h1, 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - []byte("app_hash2"), []byte("cons_hash"), []byte("results_hash"), + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), }, map[int64]*types.ValidatorSet{ @@ -855,7 +928,6 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { map[int64]*types.SignedHeader{ 1: h1, 2: h2, - 3: {Header: nil, Commit: nil}, }, map[int64]*types.ValidatorSet{ 1: vals, @@ -863,49 +935,167 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { }, ) - c, err := lite.NewClient( + lb1, _ := badProvider1.LightBlock(ctx, 2) + require.NotEqual(t, lb1.Hash(), l1.Hash()) + + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, []provider.Provider{badProvider1, badProvider2}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), - lite.MaxRetryAttempts(1), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), ) // witness should have behaved properly -> no error require.NoError(t, err) assert.EqualValues(t, 2, len(c.Witnesses())) // witness behaves incorrectly -> removed from list, no error - h, err := c.VerifyHeaderAtHeight(2, bTime.Add(2*time.Hour)) + l, err := c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) assert.NoError(t, err) assert.EqualValues(t, 1, len(c.Witnesses())) - // header should still be verified - assert.EqualValues(t, 2, h.Height) + // light block should still be verified + assert.EqualValues(t, 2, l.Height) - // no witnesses left to verify -> error - _, err = c.VerifyHeaderAtHeight(3, bTime.Add(2*time.Hour)) - assert.Error(t, err) - assert.EqualValues(t, 0, len(c.Witnesses())) + // remaining witnesses don't have light block -> error + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrFailedHeaderCrossReferencing, err) + } + // witness does not have a light block -> left in the list + assert.EqualValues(t, 1, len(c.Witnesses())) } -func TestClientTrustedValidatorSet(t *testing.T) { - c, err := lite.NewClient( +func TestClient_TrustedValidatorSet(t *testing.T) { + differentVals, _ := types.RandValidatorSet(10, 100) + badValSetNode := mockp.New( + chainID, + map[int64]*types.SignedHeader{ + 1: h1, + // 3/3 signed, but validator set at height 2 below is invalid -> witness + // should be removed. + 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), + 0, len(keys), types.BlockID{Hash: h1.Hash()}), + 3: h3, + }, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: differentVals, + 3: differentVals, + }, + ) + + c, err := light.NewClient( + ctx, chainID, trustOptions, fullNode, - []provider.Provider{fullNode}, + []provider.Provider{badValSetNode, fullNode}, dbs.New(dbm.NewMemDB(), chainID), - lite.Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) + require.NoError(t, err) + assert.Equal(t, 2, len(c.Witnesses())) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.Witnesses())) +} + +func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + fullNode, + []provider.Provider{fullNode}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.PruningSize(1), + ) + require.NoError(t, err) + _, err = c.TrustedLightBlock(1) require.NoError(t, err) - _, err = c.VerifyHeaderAtHeight(2, bTime.Add(2*time.Hour).Add(1*time.Second)) + h, err := c.Update(ctx, bTime.Add(2*time.Hour)) require.NoError(t, err) + require.Equal(t, int64(3), h.Height) + + _, err = c.TrustedLightBlock(1) + assert.Error(t, err) +} + +func TestClientEnsureValidHeadersAndValSets(t *testing.T) { + emptyValSet := &types.ValidatorSet{ + Validators: nil, + Proposer: nil, + } + + testCases := []struct { + headers map[int64]*types.SignedHeader + vals map[int64]*types.ValidatorSet + err bool + }{ + { + headerSet, + valSet, + false, + }, + { + headerSet, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: nil, + }, + true, + }, + { + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, + 3: nil, + }, + valSet, + true, + }, + { + headerSet, + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: emptyValSet, + }, + true, + }, + } + + for _, tc := range testCases { + badNode := mockp.New( + chainID, + tc.headers, + tc.vals, + ) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + badNode, + []provider.Provider{badNode, badNode}, + dbs.New(dbm.NewMemDB(), chainID), + light.MaxRetryAttempts(1), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } - valSet, height, err := c.TrustedValidatorSet(0) - assert.NoError(t, err) - assert.NotNil(t, valSet) - assert.EqualValues(t, 2, height) } diff --git a/light/detector.go b/light/detector.go new file mode 100644 index 000000000..6c8e5f8d1 --- /dev/null +++ b/light/detector.go @@ -0,0 +1,250 @@ +package light + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/types" +) + +// The detector component of the light client detect and handles attacks on the light client. +// More info here: +// tendermint/docs/architecture/adr-047-handling-evidence-from-light-client.md + +// detectDivergence is a second wall of defense for the light client. +// +// It takes the target verified header and compares it with the headers of a set of +// witness providers that the light client is connected to. If a conflicting header +// is returned it verifies and examines the conflicting header against the verified +// trace that was produced from the primary. If successful it produces two sets of evidence +// and sends them to the opposite provider before halting. +// +// If there are no conflictinge headers, the light client deems the verified target header +// trusted and saves it to the trusted store. +func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error { + if primaryTrace == nil || len(primaryTrace) < 2 { + return errors.New("nil or single block primary trace") + } + var ( + headerMatched bool + lastVerifiedHeader = primaryTrace[len(primaryTrace)-1].SignedHeader + witnessesToRemove = make([]int, 0) + ) + c.logger.Debug("Running detector against trace", "endBlockHeight", lastVerifiedHeader.Height, + "endBlockHash", lastVerifiedHeader.Hash, "length", len(primaryTrace)) + + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + + if len(c.witnesses) == 0 { + return errNoWitnesses{} + } + + // launch one goroutine per witness to retrieve the light block of the target height + // and compare it with the header from the primary + errc := make(chan error, len(c.witnesses)) + for i, witness := range c.witnesses { + go c.compareNewHeaderWithWitness(ctx, errc, lastVerifiedHeader, witness, i) + } + + // handle errors from the header comparisons as they come in + for i := 0; i < cap(errc); i++ { + err := <-errc + + switch e := err.(type) { + case nil: // at least one header matched + headerMatched = true + case errConflictingHeaders: + // We have conflicting headers. This could possibly imply an attack on the light client. + // First we need to verify the witness's header using the same skipping verification and then we + // need to find the point that the headers diverge and examine this for any evidence of an attack. + // + // We combine these actions together, verifying the witnesses headers and outputting the trace + // which captures the bifurcation point and if successful provides the information to create + supportingWitness := c.witnesses[e.WitnessIndex] + witnessTrace, primaryBlock, err := c.examineConflictingHeaderAgainstTrace( + ctx, + primaryTrace, + e.Block.SignedHeader, + supportingWitness, + now, + ) + if err != nil { + c.logger.Info("Error validating witness's divergent header", "witness", supportingWitness, "err", err) + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + continue + } + + // We are suspecting that the primary is faulty, hence we hold the witness as the source of truth + // and generate evidence against the primary that we can send to the witness + primaryEv := newLightClientAttackEvidence(primaryBlock, witnessTrace[len(witnessTrace)-1], witnessTrace[0]) + c.logger.Error("Attempted attack detected. Sending evidence againt primary by witness", "ev", primaryEv, + "primary", c.primary, "witness", supportingWitness) + c.sendEvidence(ctx, primaryEv, supportingWitness) + + if primaryBlock.Commit.Round != witnessTrace[len(witnessTrace)-1].Commit.Round { + c.logger.Info("The light client has detected, and prevented, an attempted amnesia attack." + + " We think this attack is pretty unlikely, so if you see it, that's interesting to us." + + " Can you let us know by opening an issue through https://github.com/tendermint/tendermint/issues/new?") + } + + // This may not be valid because the witness itself is at fault. So now we reverse it, examining the + // trace provided by the witness and holding the primary as the source of truth. Note: primary may not + // respond but this is okay as we will halt anyway. + primaryTrace, witnessBlock, err := c.examineConflictingHeaderAgainstTrace( + ctx, + witnessTrace, + primaryBlock.SignedHeader, + c.primary, + now, + ) + if err != nil { + c.logger.Info("Error validating primary's divergent header", "primary", c.primary, "err", err) + return ErrLightClientAttack + } + + // We now use the primary trace to create evidence against the witness and send it to the primary + witnessEv := newLightClientAttackEvidence(witnessBlock, primaryTrace[len(primaryTrace)-1], primaryTrace[0]) + c.logger.Error("Sending evidence against witness by primary", "ev", witnessEv, + "primary", c.primary, "witness", supportingWitness) + c.sendEvidence(ctx, witnessEv, c.primary) + // We return the error and don't process anymore witnesses + return ErrLightClientAttack + + case errBadWitness: + c.logger.Info("Witness returned an error during header comparison", "witness", c.witnesses[e.WitnessIndex], + "err", err) + // if witness sent us an invalid header, then remove it. If it didn't respond or couldn't find the block, then we + // ignore it and move on to the next witness + if _, ok := e.Reason.(provider.ErrBadLightBlock); ok { + c.logger.Info("Witness sent us invalid header / vals -> removing it", "witness", c.witnesses[e.WitnessIndex]) + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) + } + } + } + + for _, idx := range witnessesToRemove { + c.removeWitness(idx) + } + + // 1. If we had at least one witness that returned the same header then we + // conclude that we can trust the header + if headerMatched { + return nil + } + + // 2. ELse all witnesses have either not responded, don't have the block or sent invalid blocks. + return ErrFailedHeaderCrossReferencing +} + +// compareNewHeaderWithWitness takes the verified header from the primary and compares it with a +// header from a specified witness. The function can return one of three errors: +// +// 1: errConflictingHeaders -> there may have been an attack on this light client +// 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one +// Note: In the case of an invalid header we remove the witness +// 3: nil -> the hashes of the two headers match +func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, + witness provider.Provider, witnessIndex int) { + + lightBlock, err := witness.LightBlock(ctx, h.Height) + if err != nil { + errc <- errBadWitness{Reason: err, WitnessIndex: witnessIndex} + return + } + + if !bytes.Equal(h.Hash(), lightBlock.Hash()) { + errc <- errConflictingHeaders{Block: lightBlock, WitnessIndex: witnessIndex} + } + + c.logger.Debug("Matching header received by witness", "height", h.Height, "witness", witnessIndex) + errc <- nil +} + +// sendEvidence sends evidence to a provider on a best effort basis. +func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEvidence, receiver provider.Provider) { + err := receiver.ReportEvidence(ctx, ev) + if err != nil { + c.logger.Error("Failed to report evidence to provider", "ev", ev, "provider", receiver) + } +} + +// examineConflictingHeaderAgainstTrace takes a trace from one provider and a divergent header that +// it has received from another and preforms verifySkipping at the heights of each of the intermediate +// headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen. +// +// 1. The light client verifies a header that is different to the intermediate header in the trace. This +// is the bifurcation point and the light client can create evidence from it +// 2. The source stops responding, doesn't have the block or sends an invalid header in which case we +// return the error and remove the witness +func (c *Client) examineConflictingHeaderAgainstTrace( + ctx context.Context, + trace []*types.LightBlock, + divergentHeader *types.SignedHeader, + source provider.Provider, now time.Time) ([]*types.LightBlock, *types.LightBlock, error) { + + var previouslyVerifiedBlock *types.LightBlock + + for idx, traceBlock := range trace { + // The first block in the trace MUST be the same to the light block that the source produces + // else we cannot continue with verification. + sourceBlock, err := source.LightBlock(ctx, traceBlock.Height) + if err != nil { + return nil, nil, err + } + + if idx == 0 { + if shash, thash := sourceBlock.Hash(), traceBlock.Hash(); !bytes.Equal(shash, thash) { + return nil, nil, fmt.Errorf("trusted block is different to the source's first block (%X = %X)", + thash, shash) + } + previouslyVerifiedBlock = sourceBlock + continue + } + + // we check that the source provider can verify a block at the same height of the + // intermediate height + trace, err := c.verifySkipping(ctx, source, previouslyVerifiedBlock, sourceBlock, now) + if err != nil { + return nil, nil, fmt.Errorf("verifySkipping of conflicting header failed: %w", err) + } + // check if the headers verified by the source has diverged from the trace + if shash, thash := sourceBlock.Hash(), traceBlock.Hash(); !bytes.Equal(shash, thash) { + // Bifurcation point found! + return trace, traceBlock, nil + } + + // headers are still the same. update the previouslyVerifiedBlock + previouslyVerifiedBlock = sourceBlock + } + + // We have reached the end of the trace without observing a divergence. The last header is thus different + // from the divergent header that the source originally sent us, then we return an error. + return nil, nil, fmt.Errorf("source provided different header to the original header it provided (%X != %X)", + previouslyVerifiedBlock.Hash(), divergentHeader.Hash()) + +} + +// newLightClientAttackEvidence determines the type of attack and then forms the evidence filling out +// all the fields such that it is ready to be sent to a full node. +func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence { + ev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted} + // if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we + // return the height of the conflicting block else if it is a lunatic attack and the validator sets + // are not the same then we send the height of the common header. + if ev.ConflictingHeaderIsInvalid(trusted.Header) { + ev.CommonHeight = common.Height + ev.Timestamp = common.Time + ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower() + } else { + ev.CommonHeight = trusted.Height + ev.Timestamp = trusted.Time + ev.TotalVotingPower = trusted.ValidatorSet.TotalVotingPower() + } + ev.ByzantineValidators = ev.GetByzantineValidators(common.ValidatorSet, trusted.SignedHeader) + return ev +} diff --git a/light/detector_test.go b/light/detector_test.go new file mode 100644 index 000000000..bcf494159 --- /dev/null +++ b/light/detector_test.go @@ -0,0 +1,262 @@ +package light_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/light/provider" + mockp "github.com/tendermint/tendermint/light/provider/mock" + dbs "github.com/tendermint/tendermint/light/store/db" + "github.com/tendermint/tendermint/types" +) + +func TestLightClientAttackEvidence_Lunatic(t *testing.T) { + // primary performs a lunatic attack + var ( + latestHeight = int64(10) + valSize = 5 + divergenceHeight = int64(6) + primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) + primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) + ) + + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight, valSize, 2, bTime) + witness := mockp.New(chainID, witnessHeaders, witnessValidators) + forgedKeys := chainKeys[divergenceHeight-1].ChangeKeys(3) // we change 3 out of the 5 validators (still 2/5 remain) + forgedVals := forgedKeys.ToValidators(2, 0) + + for height := int64(1); height <= latestHeight; height++ { + if height < divergenceHeight { + primaryHeaders[height] = witnessHeaders[height] + primaryValidators[height] = witnessValidators[height] + continue + } + primaryHeaders[height] = forgedKeys.GenSignedHeader(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + nil, forgedVals, forgedVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(forgedKeys)) + primaryValidators[height] = forgedVals + } + primary := mockp.New(chainID, primaryHeaders, primaryValidators) + + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: primaryHeaders[1].Hash(), + }, + primary, + []provider.Provider{witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + require.NoError(t, err) + + // Check verification returns an error. + _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrLightClientAttack, err) + } + + // Check evidence was sent to both full nodes. + evAgainstPrimary := &types.LightClientAttackEvidence{ + // after the divergence height the valset doesn't change so we expect the evidence to be for height 10 + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[10], + ValidatorSet: primaryValidators[10], + }, + CommonHeight: 4, + } + assert.True(t, witness.HasEvidence(evAgainstPrimary)) + + evAgainstWitness := &types.LightClientAttackEvidence{ + // when forming evidence against witness we learn that the canonical chain continued to change validator sets + // hence the conflicting block is at 7 + ConflictingBlock: &types.LightBlock{ + SignedHeader: witnessHeaders[7], + ValidatorSet: witnessValidators[7], + }, + CommonHeight: 4, + } + assert.True(t, primary.HasEvidence(evAgainstWitness)) +} + +func TestLightClientAttackEvidence_Equivocation(t *testing.T) { + verificationOptions := map[string]light.Option{ + "sequential": light.SequentialVerification(), + "skipping": light.SkippingVerification(light.DefaultTrustLevel), + } + + for s, verificationOption := range verificationOptions { + t.Log("==> verification", s) + + // primary performs an equivocation attack + var ( + latestHeight = int64(10) + valSize = 5 + divergenceHeight = int64(6) + primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) + primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) + ) + // validators don't change in this network (however we still use a map just for convenience) + witnessHeaders, witnessValidators, chainKeys := genMockNodeWithKeys(chainID, latestHeight+2, valSize, 2, bTime) + witness := mockp.New(chainID, witnessHeaders, witnessValidators) + + for height := int64(1); height <= latestHeight; height++ { + if height < divergenceHeight { + primaryHeaders[height] = witnessHeaders[height] + primaryValidators[height] = witnessValidators[height] + continue + } + // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for + // a different block (which we do by adding txs) + primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, + bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, + witnessValidators[height], witnessValidators[height+1], hash("app_hash"), + hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) + primaryValidators[height] = witnessValidators[height] + } + primary := mockp.New(chainID, primaryHeaders, primaryValidators) + + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 4 * time.Hour, + Height: 1, + Hash: primaryHeaders[1].Hash(), + }, + primary, + []provider.Provider{witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + verificationOption, + ) + require.NoError(t, err) + + // Check verification returns an error. + _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + if assert.Error(t, err) { + assert.Equal(t, light.ErrLightClientAttack, err) + } + + // Check evidence was sent to both full nodes. + // Common height should be set to the height of the divergent header in the instance + // of an equivocation attack and the validator sets are the same as what the witness has + evAgainstPrimary := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: primaryHeaders[divergenceHeight], + ValidatorSet: primaryValidators[divergenceHeight], + }, + CommonHeight: divergenceHeight, + } + assert.True(t, witness.HasEvidence(evAgainstPrimary)) + + evAgainstWitness := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: witnessHeaders[divergenceHeight], + ValidatorSet: witnessValidators[divergenceHeight], + }, + CommonHeight: divergenceHeight, + } + assert.True(t, primary.HasEvidence(evAgainstWitness)) + } +} + +// 1. Different nodes therefore a divergent header is produced. +// => light client returns an error upon creation because primary and witness +// have a different view. +func TestClientDivergentTraces1(t *testing.T) { + primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + firstBlock, err := primary.LightBlock(ctx, 1) + require.NoError(t, err) + witness := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + + _, err = light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Height: 1, + Hash: firstBlock.Hash(), + Period: 4 * time.Hour, + }, + primary, + []provider.Provider{witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + require.Error(t, err) + assert.Contains(t, err.Error(), "does not match primary") +} + +// 2. Two out of three nodes don't respond but the third has a header that matches +// => verification should be successful and all the witnesses should remain +func TestClientDivergentTraces2(t *testing.T) { + primary := mockp.New(genMockNode(chainID, 10, 5, 2, bTime)) + firstBlock, err := primary.LightBlock(ctx, 1) + require.NoError(t, err) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Height: 1, + Hash: firstBlock.Hash(), + Period: 4 * time.Hour, + }, + primary, + []provider.Provider{deadNode, deadNode, primary}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + assert.NoError(t, err) + assert.Equal(t, 3, len(c.Witnesses())) +} + +// 3. witness has the same first header, but different second header +// => creation should succeed, but the verification should fail +func TestClientDivergentTraces3(t *testing.T) { + _, primaryHeaders, primaryVals := genMockNode(chainID, 10, 5, 2, bTime) + primary := mockp.New(chainID, primaryHeaders, primaryVals) + + firstBlock, err := primary.LightBlock(ctx, 1) + require.NoError(t, err) + + _, mockHeaders, mockVals := genMockNode(chainID, 10, 5, 2, bTime) + mockHeaders[1] = primaryHeaders[1] + mockVals[1] = primaryVals[1] + witness := mockp.New(chainID, mockHeaders, mockVals) + + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Height: 1, + Hash: firstBlock.Hash(), + Period: 4 * time.Hour, + }, + primary, + []provider.Provider{witness}, + dbs.New(dbm.NewMemDB(), chainID), + light.Logger(log.TestingLogger()), + light.MaxRetryAttempts(1), + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 10, bTime.Add(1*time.Hour)) + assert.Error(t, err) + assert.Equal(t, 0, len(c.Witnesses())) +} diff --git a/lite2/doc.go b/light/doc.go similarity index 92% rename from lite2/doc.go rename to light/doc.go index f42aa64f1..700bbeb6c 100644 --- a/lite2/doc.go +++ b/light/doc.go @@ -1,5 +1,5 @@ /* -Package lite provides a light client implementation. +package light provides a light client implementation. The concept of light clients was introduced in the Bitcoin white paper. It describes a watcher of distributed consensus process that only validates the @@ -14,7 +14,7 @@ chain's state machine). In a network that is expected to reliably punish validators for misbehavior by slashing bonded stake and where the validator set changes infrequently, clients -can take advantage of this assumption to safely synchronize a lite client +can take advantage of this assumption to safely synchronize a light client without downloading the intervening headers. Light clients (and full nodes) operating in the Proof Of Stake context need a @@ -41,15 +41,15 @@ precommit-vote signatures to prove its validity (> 2/3 of the voting power) given the validator set responsible for signing that header. The hash of the next validator set is included and signed in the SignedHeader. -This lets the lite client keep track of arbitrary changes to the validator set, +This lets the light client keep track of arbitrary changes to the validator set, as every change to the validator set must be approved by inclusion in the header and signed in the commit. In the worst case, with every block changing the validators around completely, -a lite client can sync up with every block header to verify each validator set +a light client can sync up with every block header to verify each validator set change on the chain. In practice, most applications will not have frequent drastic updates to the validator set, so the logic defined in this package for -lite client syncing is optimized to use intelligent bisection. +light client syncing is optimized to use intelligent bisection. # What this package provides @@ -63,7 +63,7 @@ This package provides three major things: Example usage: - db, err := dbm.NewGoLevelDB("lite-client-db", dbDir) + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) if err != nil { // handle error } @@ -124,4 +124,4 @@ Or see https://github.com/tendermint/spec/tree/master/spec/consensus/light-client for the full spec */ -package lite +package light diff --git a/light/errors.go b/light/errors.go new file mode 100644 index 000000000..ae54436d0 --- /dev/null +++ b/light/errors.go @@ -0,0 +1,104 @@ +package light + +import ( + "errors" + "fmt" + "time" + + "github.com/tendermint/tendermint/types" +) + +// ErrOldHeaderExpired means the old (trusted) header has expired according to +// the given trustingPeriod and current time. If so, the light client must be +// reset subjectively. +type ErrOldHeaderExpired struct { + At time.Time + Now time.Time +} + +func (e ErrOldHeaderExpired) Error() string { + return fmt.Sprintf("old header has expired at %v (now: %v)", e.At, e.Now) +} + +// ErrNewValSetCantBeTrusted means the new validator set cannot be trusted +// because < 1/3rd (+trustLevel+) of the old validator set has signed. +type ErrNewValSetCantBeTrusted struct { + Reason types.ErrNotEnoughVotingPowerSigned +} + +func (e ErrNewValSetCantBeTrusted) Error() string { + return fmt.Sprintf("cant trust new val set: %v", e.Reason) +} + +// ErrInvalidHeader means the header either failed the basic validation or +// commit is not signed by 2/3+. +type ErrInvalidHeader struct { + Reason error +} + +func (e ErrInvalidHeader) Error() string { + return fmt.Sprintf("invalid header: %v", e.Reason) +} + +// ErrFailedHeaderCrossReferencing is returned when the detector was not able to cross reference the header +// with any of the connected witnesses. +var ErrFailedHeaderCrossReferencing = errors.New("all witnesses have either not responded, don't have the " + + " blocks or sent invalid blocks. You should look to change your witnesses" + + " or review the light client's logs for more information") + +// ErrVerificationFailed means either sequential or skipping verification has +// failed to verify from header #1 to header #2 due to some reason. +type ErrVerificationFailed struct { + From int64 + To int64 + Reason error +} + +// Unwrap returns underlying reason. +func (e ErrVerificationFailed) Unwrap() error { + return e.Reason +} + +func (e ErrVerificationFailed) Error() string { + return fmt.Sprintf("verify from #%d to #%d failed: %v", e.From, e.To, e.Reason) +} + +// ErrLightClientAttack is returned when the light client has detected an attempt +// to verify a false header and has sent the evidence to either a witness or primary. +var ErrLightClientAttack = errors.New("attempted attack detected." + + " Light client received valid conflicting header from witness." + + " Unable to verify header. Evidence has been sent to both providers." + + " Check logs for full evidence and trace") + +// ----------------------------- INTERNAL ERRORS --------------------------------- + +// ErrConflictingHeaders is thrown when two conflicting headers are discovered. +type errConflictingHeaders struct { + Block *types.LightBlock + WitnessIndex int +} + +func (e errConflictingHeaders) Error() string { + return fmt.Sprintf( + "header hash (%X) from witness (%d) does not match primary", + e.Block.Hash(), e.WitnessIndex) +} + +// errNoWitnesses means that there are not enough witnesses connected to +// continue running the light client. +type errNoWitnesses struct{} + +func (e errNoWitnesses) Error() string { + return "no witnesses connected. please reset light client" +} + +// errBadWitness is returned when the witness either does not respond or +// responds with an invalid header. +type errBadWitness struct { + Reason error + WitnessIndex int +} + +func (e errBadWitness) Error() string { + return fmt.Sprintf("Witness %d returned error: %s", e.WitnessIndex, e.Reason.Error()) +} diff --git a/lite2/example_test.go b/light/example_test.go similarity index 63% rename from lite2/example_test.go rename to light/example_test.go index 0de5f1349..b599778b8 100644 --- a/lite2/example_test.go +++ b/light/example_test.go @@ -1,6 +1,7 @@ -package lite_test +package light_test import ( + "context" "fmt" "io/ioutil" stdlog "log" @@ -11,10 +12,11 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" - lite "github.com/tendermint/tendermint/lite2" - "github.com/tendermint/tendermint/lite2/provider" - httpp "github.com/tendermint/tendermint/lite2/provider/http" - dbs "github.com/tendermint/tendermint/lite2/store/db" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/light/provider" + httpp "github.com/tendermint/tendermint/light/provider/http" + dbs "github.com/tendermint/tendermint/light/store/db" rpctest "github.com/tendermint/tendermint/rpc/test" ) @@ -23,7 +25,7 @@ func ExampleClient_Update() { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "lite-client-example") + dbDir, err := ioutil.TempDir("", "light-client-example") if err != nil { stdlog.Fatal(err) } @@ -39,42 +41,41 @@ func ExampleClient_Update() { stdlog.Fatal(err) } - header, err := primary.SignedHeader(2) + block, err := primary.LightBlock(context.Background(), 2) if err != nil { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("lite-client-db", dbDir) + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) if err != nil { stdlog.Fatal(err) } - c, err := lite.NewClient( + c, err := light.NewClient( + context.Background(), chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, - Hash: header.Hash(), + Hash: block.Hash(), }, primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db, chainID), - // Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) if err != nil { stdlog.Fatal(err) } defer func() { - c.Cleanup() + if err := c.Cleanup(); err != nil { + stdlog.Fatal(err) + } }() time.Sleep(2 * time.Second) - // XXX: 30 * time.Minute clock drift is needed because a) Tendermint strips - // monotonic component (see types/time/time.go) b) single instance is being - // run. - // https://github.com/tendermint/tendermint/issues/4489 - h, err := c.Update(time.Now().Add(30 * time.Minute)) + h, err := c.Update(context.Background(), time.Now()) if err != nil { stdlog.Fatal(err) } @@ -87,12 +88,12 @@ func ExampleClient_Update() { // Output: successful update } -// Manually getting headers and verifying them. -func ExampleClient_VerifyHeaderAtHeight() { +// Manually getting light blocks and verifying them. +func ExampleClient_VerifyLightBlockAtHeight() { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "lite-client-example") + dbDir, err := ioutil.TempDir("", "light-client-example") if err != nil { stdlog.Fatal(err) } @@ -108,41 +109,44 @@ func ExampleClient_VerifyHeaderAtHeight() { stdlog.Fatal(err) } - header, err := primary.SignedHeader(2) + block, err := primary.LightBlock(context.Background(), 2) if err != nil { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("lite-client-db", dbDir) + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) if err != nil { stdlog.Fatal(err) } - c, err := lite.NewClient( + c, err := light.NewClient( + context.Background(), chainID, - lite.TrustOptions{ + light.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, - Hash: header.Hash(), + Hash: block.Hash(), }, primary, []provider.Provider{primary}, // NOTE: primary should not be used here dbs.New(db, chainID), - // Logger(log.TestingLogger()), + light.Logger(log.TestingLogger()), ) if err != nil { stdlog.Fatal(err) } defer func() { - c.Cleanup() + if err := c.Cleanup(); err != nil { + stdlog.Fatal(err) + } }() - _, err = c.VerifyHeaderAtHeight(3, time.Now()) + _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now()) if err != nil { stdlog.Fatal(err) } - h, err := c.TrustedHeader(3) + h, err := c.TrustedLightBlock(3) if err != nil { stdlog.Fatal(err) } diff --git a/lite2/helpers_test.go b/light/helpers_test.go similarity index 77% rename from lite2/helpers_test.go rename to light/helpers_test.go index d9ab46f92..980ded956 100644 --- a/lite2/helpers_test.go +++ b/light/helpers_test.go @@ -1,13 +1,16 @@ -package lite_test +package light_test import ( "time" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) // privKeys is a helper type for testing. @@ -70,23 +73,20 @@ func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { } // signHeader properly signs the header with all keys from first to last exclusive. -func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { +func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { commitSigs := make([]types.CommitSig, len(pkz)) for i := 0; i < len(pkz); i++ { commitSigs[i] = types.NewCommitSigAbsent() } - // We need this list to keep the ordering. - vset := pkz.ToValidators(1, 0) - blockID := types.BlockID{ - Hash: header.Hash(), - PartsHeader: types.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)}, + Hash: header.Hash(), + PartSetHeader: types.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)}, } // Fill in the votes we want. for i := first; i < last && i < len(pkz); i++ { - vote := makeVote(header, vset, pkz[i], blockID) + vote := makeVote(header, valSet, pkz[i], blockID) commitSigs[vote.ValidatorIndex] = vote.CommitSig() } @@ -104,16 +104,18 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, Height: header.Height, Round: 1, Timestamp: tmtime.Now(), - Type: types.PrecommitType, + Type: tmproto.PrecommitType, BlockID: blockID, } + + v := vote.ToProto() // Sign it - signBytes := vote.SignBytes(header.ChainID) - // TODO Consider reworking makeVote API to return an error + signBytes := types.VoteSignBytes(header.ChainID, v) sig, err := key.Sign(signBytes) if err != nil { panic(err) } + vote.Signature = sig return vote @@ -123,6 +125,7 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 0}, ChainID: chainID, Height: height, Time: bTime, @@ -134,6 +137,7 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, AppHash: appHash, ConsensusHash: consHash, LastResultsHash: resHash, + ProposerAddress: valset.Validators[0].Address, } } @@ -144,7 +148,7 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Tim header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, first, last), + Commit: pkz.signHeader(header, valset, first, last), } } @@ -157,7 +161,7 @@ func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTi header.LastBlockID = lastBlockID return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, first, last), + Commit: pkz.signHeader(header, valset, first, last), } } @@ -169,19 +173,20 @@ func (pkz privKeys) ChangeKeys(delta int) privKeys { // Generates the header and validator set to create a full entire mock node with blocks to height ( // blockSize) and with variation in validator sets. BlockIntervals are in per minute. // NOTE: Expected to have a large validator set size ~ 100 validators. -func GenMockNode( +func genMockNodeWithKeys( chainID string, blockSize int64, valSize int, valVariation float32, bTime time.Time) ( - string, map[int64]*types.SignedHeader, - map[int64]*types.ValidatorSet) { + map[int64]*types.ValidatorSet, + map[int64]privKeys) { var ( headers = make(map[int64]*types.SignedHeader, blockSize) - valset = make(map[int64]*types.ValidatorSet, blockSize) + valset = make(map[int64]*types.ValidatorSet, blockSize+1) + keymap = make(map[int64]privKeys, blockSize+1) keys = genPrivKeys(valSize) totalVariation = valVariation valVariationInt int @@ -191,14 +196,16 @@ func GenMockNode( valVariationInt = int(totalVariation) totalVariation = -float32(valVariationInt) newKeys = keys.ChangeKeys(valVariationInt) + keymap[1] = keys + keymap[2] = newKeys // genesis header and vals lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil, - keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"), - []byte("results_hash"), 0, len(keys)) + keys.ToValidators(2, 0), newKeys.ToValidators(2, 0), hash("app_hash"), hash("cons_hash"), + hash("results_hash"), 0, len(keys)) currentHeader := lastHeader headers[1] = currentHeader - valset[1] = keys.ToValidators(2, 2) + valset[1] = keys.ToValidators(2, 0) keys = newKeys for height := int64(2); height <= blockSize; height++ { @@ -208,13 +215,31 @@ func GenMockNode( newKeys = keys.ChangeKeys(valVariationInt) currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), nil, - keys.ToValidators(2, 2), newKeys.ToValidators(2, 2), []byte("app_hash"), []byte("cons_hash"), - []byte("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) + keys.ToValidators(2, 0), newKeys.ToValidators(2, 0), hash("app_hash"), hash("cons_hash"), + hash("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) headers[height] = currentHeader - valset[height] = keys.ToValidators(2, 2) + valset[height] = keys.ToValidators(2, 0) lastHeader = currentHeader keys = newKeys + keymap[height+1] = keys } + return headers, valset, keymap +} + +func genMockNode( + chainID string, + blockSize int64, + valSize int, + valVariation float32, + bTime time.Time) ( + string, + map[int64]*types.SignedHeader, + map[int64]*types.ValidatorSet) { + headers, valset, _ := genMockNodeWithKeys(chainID, blockSize, valSize, valVariation, bTime) return chainID, headers, valset } + +func hash(s string) []byte { + return tmhash.Sum([]byte(s)) +} diff --git a/light/mbt/doc.go b/light/mbt/doc.go new file mode 100644 index 000000000..fdbea479a --- /dev/null +++ b/light/mbt/doc.go @@ -0,0 +1,20 @@ +// Package mbt provides a test runner for model-based tests +// +// Model-based tests are generated by +// https://github.com/informalsystems/tendermint-rs/tree/master/testgen, which +// first turns TLA+ specifications into test scenarios. Those test scenarios +// are then in turn used to generate actual fixtures representing light blocks. +// +// The test runner initializes the light client with a trusted light block. For +// each next light block, it tries to verify the block and asserts the outcome +// ("verdict" field in .json files). +// +// In the first version (v1), JSON files are directly added to the repo. In +// the future (v2), they will be generated by the testgen binary right before +// testing on CI (the number of files will be around thousands). +// +// NOTE (v1): If a breaking change is introduced into the SignedHeader or +// ValidatorSet, you will need to regenerate the JSON files using testgen +// binary (may also require modifying tendermint-rs, e.g. +// https://github.com/informalsystems/tendermint-rs/pull/647) +package mbt diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go new file mode 100644 index 000000000..bf6ab3d43 --- /dev/null +++ b/light/mbt/driver_test.go @@ -0,0 +1,122 @@ +package mbt + +import ( + "io/ioutil" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/types" +) + +const jsonDir = "./json" + +func TestVerify(t *testing.T) { + filenames := jsonFilenames(t) + + for _, filename := range filenames { + filename := filename + t.Run(filename, func(t *testing.T) { + + jsonBlob, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + var tc testCase + err = tmjson.Unmarshal(jsonBlob, &tc) + if err != nil { + t.Fatal(err) + } + + t.Log(tc.Description) + + var ( + trustedSignedHeader = tc.Initial.SignedHeader + trustedNextVals = tc.Initial.NextValidatorSet + trustingPeriod = time.Duration(tc.Initial.TrustingPeriod) * time.Nanosecond + ) + + for _, input := range tc.Input { + var ( + newSignedHeader = input.LightBlock.SignedHeader + newVals = input.LightBlock.ValidatorSet + ) + + err = light.Verify( + &trustedSignedHeader, + &trustedNextVals, + newSignedHeader, + newVals, + trustingPeriod, + input.Now, + 1*time.Second, + light.DefaultTrustLevel, + ) + + t.Logf("%d -> %d", trustedSignedHeader.Height, newSignedHeader.Height) + + switch input.Verdict { + case "SUCCESS": + require.NoError(t, err) + case "NOT_ENOUGH_TRUST": + require.IsType(t, light.ErrNewValSetCantBeTrusted{}, err) + case "INVALID": + switch err.(type) { + case light.ErrOldHeaderExpired: + case light.ErrInvalidHeader: + default: + t.Fatalf("expected either ErrInvalidHeader or ErrOldHeaderExpired, but got %v", err) + } + default: + t.Fatalf("unexpected verdict: %q", input.Verdict) + } + + if err == nil { // advance + trustedSignedHeader = *newSignedHeader + trustedNextVals = *input.LightBlock.NextValidatorSet + } + } + }) + } +} + +// jsonFilenames returns a list of files in jsonDir directory +func jsonFilenames(t *testing.T) []string { + matches, err := filepath.Glob(filepath.Join(jsonDir, "*.json")) + if err != nil { + t.Fatal(err) + } + return matches +} + +type testCase struct { + Description string `json:"description"` + Initial initialData `json:"initial"` + Input []inputData `json:"input"` +} + +type initialData struct { + SignedHeader types.SignedHeader `json:"signed_header"` + NextValidatorSet types.ValidatorSet `json:"next_validator_set"` + TrustingPeriod uint64 `json:"trusting_period"` + Now time.Time `json:"now"` +} + +type inputData struct { + LightBlock lightBlockWithNextValidatorSet `json:"block"` + Now time.Time `json:"now"` + Verdict string `json:"verdict"` +} + +// In tendermint-rs, NextValidatorSet is used to verify new blocks (opposite to +// Go tendermint). +type lightBlockWithNextValidatorSet struct { + *types.SignedHeader `json:"signed_header"` + ValidatorSet *types.ValidatorSet `json:"validator_set"` + NextValidatorSet *types.ValidatorSet `json:"next_validator_set"` +} diff --git a/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json new file mode 100644 index 000000000..2c5579485 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustFailure.json @@ -0,0 +1,305 @@ +{ + "description": "MC4_4_faulty_Test2NotEnoughTrustFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "0D038B1BA2ED7B1EF4D4E250C54D3F8D7186068658FAA53900CA83F4280B1EF2", + "part_set_header": { + "total": 1, + "hash": "0D038B1BA2ED7B1EF4D4E250C54D3F8D7186068658FAA53900CA83F4280B1EF2" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "XJC+kaVazdli/oMNHnFQOujOJLxFnez2DAUv5Uy+wPGeypkinrk2c79ZmlB5YHBTJaLh6yotq1XiLzy3zUAJAQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "pj86O2mwAQcn/MggMVEK1F6yhqnaMcxqxKyZ9DgIfFVqJIgQLb5SsuqyxPcMxxRhDTjjqfkATRGIiHPEthrFCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "QssWTiluThPYflhI3bBuoeIBXlMR39I+vJb7EvLf6FVyxp0Ih7kW26wkmqjgHf0RyDAu9sny3FBrc/WbPXhFDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "9xg3G66gizJBzWybdYKRtyg8c52U6vKmUT9TKb5MQ5MP/6IVCbhnvUjzw4Oe5stsnHMGvsx6Q7IVS3Ma7CbBDA==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:45:28.160326992Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "734FC4AE3FEEAD34654D611A867E3A4F2F921DD2B8F27289EFC52C90EFC2B8D8", + "part_set_header": { + "total": 1, + "hash": "734FC4AE3FEEAD34654D611A867E3A4F2F921DD2B8F27289EFC52C90EFC2B8D8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "x7RNTkbf71fnTEyl7G6i8U5gi33nWZLha1nbZJjsIsbm7CCxcfsgU4uTWaHrZXCo1Ywok9zXgt0gaGOt7uR+BA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:18Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "E60D3DC5A38CE0773BF911BE62514F5FE6C12FA574F0571965E8EDE2D8899C01", + "part_set_header": { + "total": 1, + "hash": "E60D3DC5A38CE0773BF911BE62514F5FE6C12FA574F0571965E8EDE2D8899C01" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "L5MQUXKrrRk9I/wnx3Pai49qFdzSkkYRzM9eO7gOI5ofG2LaJoDMttkCKp2kp9/3koSWssnX+/Uuvy62XU/hCA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:18Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "2EA87BC69EB6739C5A1E06BCA5E7C9B8A5C163EB1ECF01EDD1A4A9B167C313C5", + "part_set_header": { + "total": 1, + "hash": "2EA87BC69EB6739C5A1E06BCA5E7C9B8A5C163EB1ECF01EDD1A4A9B167C313C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "bSYYr6R4pu+tcq8ji6Jnnf5EkMPcCImyROgN16KNQxzvw82fLVQ2C+E3Ry9vEV86G0fQBaxL6SFd8xers7zzDw==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "jlxTNsZ8h1uyVjWndZrvBAZpAonQhfSoC/MZSwWb0tIgpJ4/YlqUQZoRnr+QsV5btJfpDeknFD++5LAjUcsrDg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:22Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json new file mode 100644 index 000000000..1aab886f3 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test2NotEnoughTrustSuccess.json @@ -0,0 +1,462 @@ +{ + "description": "MC4_4_faulty_Test2NotEnoughTrustSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:45:11.160326991Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785", + "part_set_header": { + "total": 1, + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "hiszzyt898e+HMgChDiyWNjWpbLMQ1Kfcb1Mm8KgZM4DYdvJT79fHy/N7W08y6/9DquZKlZz6hM1GTBfrZ6ODg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "747249C8038E41C91EB3B737BAC2245F5F41B1527ABB7486C02CDF69C6B0DB53", + "part_set_header": { + "total": 1, + "hash": "747249C8038E41C91EB3B737BAC2245F5F41B1527ABB7486C02CDF69C6B0DB53" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "0sxZvRkF35OZ/ALf6xufgcP9QEeqd7mhXBD7nZ36CTSbYeeBVtEDspyz/M64UQ9PyADWkG9VtbB7zZhWEArOAg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "8F5D783FEDA6E53A6333DAB6324D567395D9189B4BBB51E3A9F2F360B667E928", + "part_set_header": { + "total": 1, + "hash": "8F5D783FEDA6E53A6333DAB6324D567395D9189B4BBB51E3A9F2F360B667E928" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "5uF6x606UvPT7JLmjEUZE6yoA5uaQU1HTi3cUgTNAeNwExwvwPsj2ERy5qxBYEzQP587g2NPDrylzHagFVmJDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "PDSL3wHNLYafgBDZ04JTHUjtQPK4LbT7FpglwYAXlfD1K51Soq4L4QUsiHqUfpp7+gykLJzluYhNQcWDLju4Dg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "Toe2ayrfxX2g/eMST8ggDIKp127ZAKUWgvw0F716mfg7jTJA6WGtDzPzPueLkBUbIyqQvcjWuuoR5FV4WnMBCQ==" + }, + { + "block_id_flag": 1, + "validator_address": null, + "timestamp": null, + "signature": null + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785", + "part_set_header": { + "total": 1, + "hash": "23DB6414C50B567947776438FC022CC24EA7489FFBA8025FAD5C4232046BE785" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "hiszzyt898e+HMgChDiyWNjWpbLMQ1Kfcb1Mm8KgZM4DYdvJT79fHy/N7W08y6/9DquZKlZz6hM1GTBfrZ6ODg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json new file mode 100644 index 000000000..1ac9a7b2a --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustFailure.json @@ -0,0 +1,538 @@ +{ + "description": "MC4_4_faulty_Test3NotEnoughTrustFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "F7DC6F348F04E01EC7DEA4348A3BFA2F0D7533900986EA66F6006C70BDD52D2E", + "part_set_header": { + "total": 1, + "hash": "F7DC6F348F04E01EC7DEA4348A3BFA2F0D7533900986EA66F6006C70BDD52D2E" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "S5wM4flAsMJ7uGSGduppmUqDeFZBUBFKkp+LTy249+AgM3oup9ULs7eUzNiwjhV4gWnPnLJ91m6IZ3s047xzAg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "ZLOGEO5mgrVoTpFA5DLMLX0ggBWnWLWmMF5tAorZC732T+oR2u2USAvGhkZtpM73WN3NUp04aVHInGMsYtz9Dg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "Lwa9l7+dJci4+mXD9ZsvLnbX0TuzWYIjfj9vU51rAftFRGEig7DHToufWaMfjwGMN53WrG72YfHAXxBigWaBBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "SSHBm3HdeyC1fgPqjTp647mRGxaCKA/GGraM0UFcuXv3mUjfjowL8CNjthJHgXIQCmYdF0HDwLZb1SCvWFe0Aw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:46:51.160327001Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8", + "part_set_header": { + "total": 1, + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "lDmtsNALIr3ZysmMkrYW5jPufVGQcR7U2rpGFwJfFeTQSohqm9yVjzLVeGsPZFjdmGUltxwi7nH63iIIjl7VCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0", + "part_set_header": { + "total": 1, + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "KZ0VUajBcnvw1Lp7DnYFGTPt6sstretUcfMY9nkszfQtvcJ1x4sFvJ/D0LWkpsNVMtNSWYobw+gfETQLVbmAAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "E98C8412BF8736722EEBFF209C5D0AB9F82B599344D043139B4D4747E1FF21EE", + "part_set_header": { + "total": 1, + "hash": "E98C8412BF8736722EEBFF209C5D0AB9F82B599344D043139B4D4747E1FF21EE" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "V2LEkvNw6vwCh5t/eTqOE0QMnRveeNV6nS9bqAD8S/dDtVnzUTwfwEgEHPwPFJDkszVkZ/9pqoKTInoO2bsHAg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "AyDrm3XpFjB1OWJdYegH3dYp+Q9ZXV/kAstddVzpvU4pL187Tad2bNMqcgoroTiwaCWC7jtOrHd4l8Tq5myjDA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "cyQzNOgKd1OKNQJChG/E0pk9+fZ4p8bIpAqD5oZy0xT+e1DywIVUVDx0LBqbfm38C4djq3klKMvTUwTcDypCDQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8", + "part_set_header": { + "total": 1, + "hash": "A63EEADF3FB32E33B113FF28726100E2ACA295E7C467005BF35FB43ADC0D53C8" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "lDmtsNALIr3ZysmMkrYW5jPufVGQcR7U2rpGFwJfFeTQSohqm9yVjzLVeGsPZFjdmGUltxwi7nH63iIIjl7VCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:07Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0", + "part_set_header": { + "total": 1, + "hash": "13C32ED0F2BED33E19B4832CEEB6F949E822449F770B9B3A7F02254F391B7CD0" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:07Z", + "signature": "KZ0VUajBcnvw1Lp7DnYFGTPt6sstretUcfMY9nkszfQtvcJ1x4sFvJ/D0LWkpsNVMtNSWYobw+gfETQLVbmAAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:09Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json new file mode 100644 index 000000000..e4c5a864d --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_Test3NotEnoughTrustSuccess.json @@ -0,0 +1,662 @@ +{ + "description": "MC4_4_faulty_Test3NotEnoughTrustSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5", + "part_set_header": { + "total": 1, + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "q0CS2J0SFpdIVuqaHEmdp8epPcZli61bfVkdA720J+TzJ06ahepHUry6P/ZD+ex6GuQcSjBP6mfzp0ksjqf3BQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "jKDmxZTfFv5xlj3byRSxV8gMDQUirQE4O8hPKvp9EvmIWwCX1S7D/qQo+GhCvfiF3QPdQ3kRCpdvwrTuq+6RBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "AL2jwdkeW/o9DjLU3vfcqKG9QCqnhKxdPN4i/miR6FIA87v4Y45jFvZw8Ue6hhwkGKs3d1QghJXVlRJFg8VXDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gV5npKv90ghI2wj2MP06qkVyWTbjBwBzdQnBS3ssggEE+is/BRMQQfKEKpmTAF0KIS+eZj7jmj8b+isxC3QfDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:46:06.160326996Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23", + "part_set_header": { + "total": 1, + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "e/kNbh2aUmnowrri9eWLo9Wf1ZuPS1cobu+ITfz0uFn8LZcQtrQXkB7sfRrTDfRGvOkm3CpWnxD+UeQTxa12CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:07Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "FE0A34650DA8A9402EA231A4D03FD1F39E0D7F894456D7268A582244FB968605", + "part_set_header": { + "total": 1, + "hash": "FE0A34650DA8A9402EA231A4D03FD1F39E0D7F894456D7268A582244FB968605" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "aVeQWMH20B1IGFIwH50HDv3qrDsvbuCuco918Spc/nHc06YJ9LYLSvo8gd7g4EoCY71eRLwPLOoHXk8Nas+XAw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23", + "part_set_header": { + "total": 1, + "hash": "0E61042148BB059117B880E371AEC93341630D01E665088844BC1D8DFA5B6B23" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "e/kNbh2aUmnowrri9eWLo9Wf1ZuPS1cobu+ITfz0uFn8LZcQtrQXkB7sfRrTDfRGvOkm3CpWnxD+UeQTxa12CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "2B141A0A08B7EF0A65BC5F4D92F00BDEF0279124DEAC497BEF4C4336D0A3CE6F", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3", + "part_set_header": { + "total": 1, + "hash": "EED66C25F857A3DA1443411CCB93DD943574A8A55F55C8E2248A129E270F9BE3" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "/GSuwjyWvPmjmcCtrg+00QmcjerrnGZueyLJvAJxhJ5gumkVvCvXB05HDoHL0D523nJHR9hMBOFMA+7cywRoCA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "wrXQ0BgJMF8EV+CWmmGersvCF9RI6/qbhBPxgAcLixV65N8RiWGba+sCfr9UHjHAEYsCsyFgQR2OLC7Bg1PKBA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:08Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestFailure.json b/light/mbt/json/MC4_4_faulty_TestFailure.json new file mode 100644 index 000000000..a63b5f1b6 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestFailure.json @@ -0,0 +1,347 @@ +{ + "description": "MC4_4_faulty_TestFailure.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "658DEEC010B33EDB1977FA7B38087A8C547D65272F6A63854959E517AAD20597", + "part_set_header": { + "total": 1, + "hash": "658DEEC010B33EDB1977FA7B38087A8C547D65272F6A63854959E517AAD20597" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gUvww0D+bCNnq0wY4GvDkWAUQO3kbi9YvmoRBAC3goRZ6mW8Fh6V9hrMQYbpRpf7LZqFAdnleFgXnnEuKz17Bg==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "54nTri+VJoBu8HCTb+c92aYrPiMSM71qVDkdRtwmE40LWPUFkTJNTqTLXbBXutQ1p5s6PyuB+p4UfWAwYCuUCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "PWesm77j/+sQh1p00pDJv3R3B9tpe1HlfhaTS2be/5FZfq3EMH3ceplTSNGsQKo0p4f8N9UUq+TYwm+3dsZeBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "ngAHu3FpNX6aW4B7xmFd7ckNScOM+lfuCQuMDs7uq20UoNnnGasFOcFMXD+0dQnRndEu1RItr+0kgxKaD6OtAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:44:52.160326989Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "32DD1A7D7E5C8106E14255B40F029DC568E3326512B50F45012580CD6683B9E6", + "part_set_header": { + "total": 1, + "hash": "32DD1A7D7E5C8106E14255B40F029DC568E3326512B50F45012580CD6683B9E6" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "RL9tPx8XS753xu4ziuoICsAVRmqhu34gx3NN0gsNGQw+HvECVb77g9pvcapRPDkkVf89be6dAIy/WjrsfATGDg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "kqxDTznWv65+GJA08AV4JTMBeKzDaG7jAhMA7P4YgFkM2KDKw2vOBw0R4LnLkzZQWJUkbzXeYRHcVoJlT35JAg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "aWEOTgdl9m5vBKDSCrUloM/2AfUp+SNDqbpJFEuhBv0DYmeRJDCEoeQnGACjaZHjW4LjaxgNnTOSBVFlaP/vAg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:16Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "A14AED7ED200C7F85F89C9A43029E0CE88691532193E198E3F45AA3375AE8D01", + "part_set_header": { + "total": 1, + "hash": "A14AED7ED200C7F85F89C9A43029E0CE88691532193E198E3F45AA3375AE8D01" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "42tNU0fwo3UKq2toY2p39ykL6ZhWrCIoGjzE5O0mmvn92SZHAg1OUGmn4c5bUF6H2kNKZXCn6Zp6T/UxhlEOBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:16Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "next_validators_hash": "F6AF3B9193F2672E2E3830EC49F0D7E527291DEDA4326EDB7A6FB812BE8F3251", + "consensus_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "AFABB1F6927F1D7845EA474BCF523AF948644C7B1301CBC17B8A264903B9AD16", + "part_set_header": { + "total": 1, + "hash": "AFABB1F6927F1D7845EA474BCF523AF948644C7B1301CBC17B8A264903B9AD16" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "If14ddLKYwosISJdPovBpU2K1+R91ZqDY/JAyuPsGXCXm70ZyciRQBoGEOVVzAs3s3hfc+OZAScGtpK8meyxDw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json b/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json new file mode 100644 index 000000000..856e0676e --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestHeaderFromFuture.json @@ -0,0 +1,162 @@ +{ + "description": "MC4_4_faulty_TestHeaderFromFuture.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5", + "part_set_header": { + "total": 1, + "hash": "C106084B050BDCC5AEBC414628992E43B6216544E19826BAB46027350C5FD3C5" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "q0CS2J0SFpdIVuqaHEmdp8epPcZli61bfVkdA720J+TzJ06ahepHUry6P/ZD+ex6GuQcSjBP6mfzp0ksjqf3BQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "jKDmxZTfFv5xlj3byRSxV8gMDQUirQE4O8hPKvp9EvmIWwCX1S7D/qQo+GhCvfiF3QPdQ3kRCpdvwrTuq+6RBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "AL2jwdkeW/o9DjLU3vfcqKG9QCqnhKxdPN4i/miR6FIA87v4Y45jFvZw8Ue6hhwkGKs3d1QghJXVlRJFg8VXDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "gV5npKv90ghI2wj2MP06qkVyWTbjBwBzdQnBS3ssggEE+is/BRMQQfKEKpmTAF0KIS+eZj7jmj8b+isxC3QfDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:33.160327005Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:23:25Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "next_validators_hash": "E624CE5E2693812E58E8DBB64C7A05149A58157114D34F08CB5992FE2BECC0A7", + "consensus_hash": "F49C3E794533450FEA327755F5962F99C88F5545453E6D517BBDD96EA066B50C", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "4A71282D7A0FA97B3809C24291E6894081710CDA0264FE31631BD524B8D62CB2", + "part_set_header": { + "total": 1, + "hash": "4A71282D7A0FA97B3809C24291E6894081710CDA0264FE31631BD524B8D62CB2" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:23:25Z", + "signature": "io43cjLaPTzkNYsEpPZhKLkh1YJzM/ZOm0JZI6Qq9KzFZODOPMpSYaitHTHeJV0gIPh/X/29A/QKd62ByAuiBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestSuccess.json b/light/mbt/json/MC4_4_faulty_TestSuccess.json new file mode 100644 index 000000000..9943b44b0 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestSuccess.json @@ -0,0 +1,479 @@ +{ + "description": "MC4_4_faulty_TestSuccess.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:44:35.160326987Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7", + "part_set_header": { + "total": 1, + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "5RJxkKr19lA4YOg848c0NfTB0qID+klbglOH4iugPMcnjwpsgwP3p+re65uFNe7NNO3D0c5CUQX6bA9TpwO5CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:19Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "6A1D90F13DCA0E65251D3DA8A07EA17A86CF79E340729DFEF165AC90FF9C2080", + "part_set_header": { + "total": 1, + "hash": "6A1D90F13DCA0E65251D3DA8A07EA17A86CF79E340729DFEF165AC90FF9C2080" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "Dg+9iWPS+P6d10RSuIgXKlC5e4IvY4/VU0fsIeCnBk5xRcjnQVy7FObhrDTLdXDo6NVd29h+ypEiLGfwPEa/CA==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:02Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "AAFE392AA939DA2A051F3C57707569B1836F93ACC8F35B57BB3CDF615B649013", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "DE957F0FC7A17229F36289714559F7FB5E908DEE04E549FF88DB72404E118581", + "part_set_header": { + "total": 1, + "hash": "DE957F0FC7A17229F36289714559F7FB5E908DEE04E549FF88DB72404E118581" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "rHPemviCweCd95mauh9ST0eW6KsC5A/melokemcZ3gH22+tcIDbLy+vkyXXgpAANKgXcblIkpflI/YJ8IaiJCQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "wUAMEasU8Rry1Z9xa5/VZTUYWHvp41vz0eUir0jl3QjVXqNS+cJgduEvu7e0uZSMjrLf2le8XKXVz2H767Z0Dw==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "+O0Pp6P+CtNt0QzY3YYPBqr2CPcCOXb3CwWR+1xTUMNDkRDLQK8UP12QdHsdqRB8Ocm2+ZKj8OTVv0uUWWPuCA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:02Z", + "signature": "QENfv06GEZj6QY64sPLTnditix/SreqiaFoQxWIpwd6mbHx0sHhk0E6z+nw8MzKssaKE7wD3km3gHEYzKnJNCg==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:20Z", + "verdict": "SUCCESS" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "next_validators_hash": "5F7419DA4B1BCFC2D2EB8C663405D9FF67DDE3BF88DB0A8A5D579E6FF1AD814E", + "consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7", + "part_set_header": { + "total": 1, + "hash": "EEF6A072BAD4A86F7B01A3E4D4E0920BA79F1FA8A25204F86697CA5C27885BF7" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "5RJxkKr19lA4YOg848c0NfTB0qID+klbglOH4iugPMcnjwpsgwP3p+re65uFNe7NNO3D0c5CUQX6bA9TpwO5CQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:21Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json b/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json new file mode 100644 index 000000000..ea57eacc9 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestUntrustedBeforeTrusted.json @@ -0,0 +1,170 @@ +{ + "description": "MC4_4_faulty_TestUntrustedBeforeTrusted.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D", + "part_set_header": { + "total": 1, + "hash": "6B68DB34DEF944920D6638B3AA84FE1DF790BC8BDC5189E201F23730D5756A9D" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "8rGIxi7DjBLFlHUo/lAgTpmzsnTZ8HOgnQaIoe+HEM5AmrjBaVDWVMb5/nNAnJTj4hcReCh4jviXcyRkItFJCA==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "3cXnzhzJLKeF47ulcIWjgqsv9JBf9olbAo0mcjo7Ij6TfmCpJO6SmTiacBkiznsFSOc1ZSH+cHDBKA4AT7ozAg==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "4O8c5hxoHR861ldolxeY9W1iXCdxYJVIf0xD3+sANSxo0ipXayv8IS7YFw1zzZvDbjRRazVzbfyBYf2jl4JeDw==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "2Hel7uygQXpjYRJZiwtPLKNxT2Tg1/F5Zzs3VZpleFII9H1e5Gs02UjU0lybSXBKk/tD+NXPsdchrH/6/DmwAQ==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:47.160327006Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:00Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "730D3D6B2E9F4F0F23879458F2D02E0004F0F241" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "D1E7988F2C5B176E1AE1D7CA03F17A2734B1A90B154D41D0C01FEE49BA63DBAA", + "part_set_header": { + "total": 1, + "hash": "D1E7988F2C5B176E1AE1D7CA03F17A2734B1A90B154D41D0C01FEE49BA63DBAA" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:00Z", + "signature": "lJ/0qcg9/3PcEtnDSR10pswu0kZjfD8GSp03Esc/O6Odg8v20ZFIZCLUEbyFays23MfMpI08bYJrF9QnKjMQAw==" + } + ] + } + }, + "validator_set": { + "validators": [] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:23:24Z", + "verdict": "INVALID" + } + ] +} diff --git a/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json b/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json new file mode 100644 index 000000000..03e573443 --- /dev/null +++ b/light/mbt/json/MC4_4_faulty_TestValsetDifferentAllSteps.json @@ -0,0 +1,371 @@ +{ + "description": "MC4_4_faulty_TestValsetDifferentAllSteps.json", + "initial": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "1", + "time": "1970-01-01T00:00:01Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "next_validators_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "consensus_hash": "5A69ACB73672274A2C020C7FAE539B2086D30F3B7E5B168A8031A21931FCA07D", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "1", + "round": 1, + "block_id": { + "hash": "42C62AB26BDCD052FD7D87449C1CA700A79780D55E2FC8129614D4D2DC24CB08", + "part_set_header": { + "total": 1, + "hash": "42C62AB26BDCD052FD7D87449C1CA700A79780D55E2FC8129614D4D2DC24CB08" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "mzNheVmshOSGCNfL/NfBBpJcofUx6cqclvEMOc9rZJ6A2pOrxO8ZymXej0FvksZ5mmhfLvZ0aW+as59WMldWBw==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "KisuL/gVSTDQP1Q51uBKd8xDZM4mX+rRKIpMlkfUYF+qW4K51sPvqL/pgKSiUwBPAoGRBzwLoavPg9oiyRwPBA==" + }, + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "fgq+19zjPxTp8HILDBaW8VJg+wzyVkthtmf0HJxdoaXd+uZRQ7LDS2Tn7LXMKAQ9Q0sjtZ4BA3H3sfv9wA56BA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:01Z", + "signature": "Zy0rovAtLk58hTcprpXU7ikCdbky5rrQ8Y3o+/Xyo7VTt3zYiCdVsYj26agu8SR3cFkV96P2ryHF6NHWGwIJDw==" + } + ] + } + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "trusting_period": "1400000000000", + "now": "2020-10-21T08:47:18.160327003Z" + }, + "input": [ + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "4", + "time": "1970-01-01T00:00:05Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "next_validators_hash": "C8F8530F1A2E69409F2E0B4F86BB568695BC9790BA77EAC1505600D5506E22DA", + "consensus_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF" + }, + "commit": { + "height": "4", + "round": 1, + "block_id": { + "hash": "943FD341C1558245A93577E0A7CF48089B9E0FA175DE817A61EF7233AF810BF6", + "part_set_header": { + "total": 1, + "hash": "943FD341C1558245A93577E0A7CF48089B9E0FA175DE817A61EF7233AF810BF6" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:05Z", + "signature": "Y9RiUiuj/kTgPU1BCNrWbNSHEcyf3nr1o0ohY1xkRf89rYRu34oJSWU65paMAfPAosfeaHHPjYXG2whJk+dGBQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "3", + "time": "1970-01-01T00:00:04Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A", + "consensus_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "81D85BE9567F7069A4760C663062E66660DADF34" + }, + "commit": { + "height": "3", + "round": 1, + "block_id": { + "hash": "48A8E428AF500C9BD5674A9A2FC1217DD97B144FD623DDD2C4679022E19A5615", + "part_set_header": { + "total": 1, + "hash": "48A8E428AF500C9BD5674A9A2FC1217DD97B144FD623DDD2C4679022E19A5615" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "81D85BE9567F7069A4760C663062E66660DADF34", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "WUKkETiWSMDSgd/7sxOD8KgDrL/kg78vXbA2r42+qEvuzZSuwob+7yHXYEn32lDtLl5lnsENVIjtqUrEPkQKBg==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:04Z", + "signature": "3H9a3YJJjqewYR3HhSMxM3yAy0niBUhWX0+6K67UJVeEtXXVIk/OQJ9HeVmghsayGEJGvzcyjbHDD9CIkk/VDw==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "NOT_ENOUGH_TRUST" + }, + { + "block": { + "signed_header": { + "header": { + "version": { + "block": "11", + "app": "0" + }, + "chain_id": "test-chain", + "height": "2", + "time": "1970-01-01T00:00:03Z", + "last_block_id": null, + "last_commit_hash": null, + "data_hash": null, + "validators_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "next_validators_hash": "C4DFBC98F77BE756D7EB3B475471189E82F7760DD111754AA2A25CF548AE6EF8", + "consensus_hash": "010ED897B4B347175BC54ADF87D640393862FF3D5038302CD523B0E97FC20079", + "app_hash": "", + "last_results_hash": null, + "evidence_hash": null, + "proposer_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A" + }, + "commit": { + "height": "2", + "round": 1, + "block_id": { + "hash": "208411D47FC3C56A3243E8BA57010A144BAD926F2FEFFBFDFB695CF19D2788CF", + "part_set_header": { + "total": 1, + "hash": "208411D47FC3C56A3243E8BA57010A144BAD926F2FEFFBFDFB695CF19D2788CF" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "EDJIttaUcyoVcfIyOdHTw6qmtY8Jrf5cEMquCYOxnahu6BUNYbomz8L2t0uscbJqrDzMaW1nGDAyNrIEoBlnDQ==" + }, + { + "block_id_flag": 2, + "validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "QtatsO+ghgyDEJKDMmoVKdeDT8E3srh7WecyladY0ityBF9TKcrBNBIImCvPlStVu5uUbmM5NbG9+2In/F3DDA==" + }, + { + "block_id_flag": 2, + "validator_address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "timestamp": "1970-01-01T00:00:03Z", + "signature": "RJ9f2beJHCxhuYBHmPc3oWdDlQ8DOfBJOz9vN8tvEmhA0zb2qE9Zxe4jyO7Xr9wvq09yXQShTZKDsjOhOF6GAQ==" + } + ] + } + }, + "validator_set": { + "validators": [ + { + "address": "0616A636E7D0579A632EC37ED3C3F2B7E8522A0A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "kwd8trZ8t5ASwgUbBEAnDq49nRRrrKvt2onhS4JSfQM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "6AE5C701F508EB5B63343858E068C5843F28105F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "next_validator_set": { + "validators": [ + { + "address": "81D85BE9567F7069A4760C663062E66660DADF34", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Lk4zm2cJO4FpzXFF9WUV9NzOLfr5jV+ps7EhwUDKlZM=" + }, + "voting_power": "50", + "proposer_priority": null + }, + { + "address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to=" + }, + "voting_power": "50", + "proposer_priority": null + } + ] + }, + "provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE" + }, + "now": "1970-01-01T00:00:06Z", + "verdict": "SUCCESS" + } + ] +} diff --git a/light/provider/errors.go b/light/provider/errors.go new file mode 100644 index 000000000..5d24efd73 --- /dev/null +++ b/light/provider/errors.go @@ -0,0 +1,25 @@ +package provider + +import ( + "errors" + "fmt" +) + +var ( + // ErrLightBlockNotFound is returned when a provider can't find the + // requested header. + ErrLightBlockNotFound = errors.New("light block not found") + // ErrNoResponse is returned if the provider doesn't respond to the + // request in a gieven time + ErrNoResponse = errors.New("client failed to respond") +) + +// ErrBadLightBlock is returned when a provider returns an invalid +// light block. +type ErrBadLightBlock struct { + Reason error +} + +func (e ErrBadLightBlock) Error() string { + return fmt.Sprintf("client provided bad signed header: %s", e.Reason.Error()) +} diff --git a/light/provider/http/http.go b/light/provider/http/http.go new file mode 100644 index 000000000..fd42d75d7 --- /dev/null +++ b/light/provider/http/http.go @@ -0,0 +1,176 @@ +package http + +import ( + "context" + "fmt" + "math/rand" + "regexp" + "strings" + "time" + + "github.com/tendermint/tendermint/light/provider" + rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + "github.com/tendermint/tendermint/types" +) + +// This is very brittle, see: https://github.com/tendermint/tendermint/issues/4740 +var ( + regexpMissingHeight = regexp.MustCompile(`height \d+ (must be less than or equal to|is not available)`) + maxRetryAttempts = 10 +) + +// http provider uses an RPC client to obtain the necessary information. +type http struct { + chainID string + client rpcclient.RemoteClient +} + +// New creates a HTTP provider, which is using the rpchttp.HTTP client under +// the hood. If no scheme is provided in the remote URL, http will be used by +// default. +func New(chainID, remote string) (provider.Provider, error) { + // Ensure URL scheme is set (default HTTP) when not provided. + if !strings.Contains(remote, "://") { + remote = "http://" + remote + } + + httpClient, err := rpchttp.New(remote, "/websocket") + if err != nil { + return nil, err + } + + return NewWithClient(chainID, httpClient), nil +} + +// NewWithClient allows you to provide a custom client. +func NewWithClient(chainID string, client rpcclient.RemoteClient) provider.Provider { + return &http{ + client: client, + chainID: chainID, + } +} + +// ChainID returns a chainID this provider was configured with. +func (p *http) ChainID() string { + return p.chainID +} + +func (p *http) String() string { + return fmt.Sprintf("http{%s}", p.client.Remote()) +} + +// LightBlock fetches a LightBlock at the given height and checks the +// chainID matches. +func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { + h, err := validateHeight(height) + if err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + + sh, err := p.signedHeader(ctx, h) + if err != nil { + return nil, err + } + + vs, err := p.validatorSet(ctx, h) + if err != nil { + return nil, err + } + + lb := &types.LightBlock{ + SignedHeader: sh, + ValidatorSet: vs, + } + + err = lb.ValidateBasic(p.chainID) + if err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + + return lb, nil +} + +// ReportEvidence calls `/broadcast_evidence` endpoint. +func (p *http) ReportEvidence(ctx context.Context, ev types.Evidence) error { + _, err := p.client.BroadcastEvidence(ctx, ev) + return err +} + +func (p *http) validatorSet(ctx context.Context, height *int64) (*types.ValidatorSet, error) { + var ( + maxPerPage = 100 + vals = []*types.Validator{} + page = 1 + ) + + for len(vals)%maxPerPage == 0 { + for attempt := 1; attempt <= maxRetryAttempts; attempt++ { + res, err := p.client.Validators(ctx, height, &page, &maxPerPage) + if err != nil { + // TODO: standardize errors on the RPC side + if regexpMissingHeight.MatchString(err.Error()) { + return nil, provider.ErrLightBlockNotFound + } + // if we have exceeded retry attempts then return no response error + if attempt == maxRetryAttempts { + return nil, provider.ErrNoResponse + } + // else we wait and try again with exponential backoff + time.Sleep(backoffTimeout(uint16(attempt))) + continue + } + if len(res.Validators) == 0 { // no more validators left + valSet, err := types.ValidatorSetFromExistingValidators(vals) + if err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + return valSet, nil + } + vals = append(vals, res.Validators...) + page++ + break + } + } + valSet, err := types.ValidatorSetFromExistingValidators(vals) + if err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + return valSet, nil +} + +func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHeader, error) { + for attempt := 1; attempt <= maxRetryAttempts; attempt++ { + commit, err := p.client.Commit(ctx, height) + if err != nil { + // TODO: standardize errors on the RPC side + if regexpMissingHeight.MatchString(err.Error()) { + return nil, provider.ErrLightBlockNotFound + } + // we wait and try again with exponential backoff + time.Sleep(backoffTimeout(uint16(attempt))) + continue + } + return &commit.SignedHeader, nil + } + return nil, provider.ErrNoResponse +} + +func validateHeight(height int64) (*int64, error) { + if height < 0 { + return nil, fmt.Errorf("expected height >= 0, got height %d", height) + } + + h := &height + if height == 0 { + h = nil + } + return h, nil +} + +// exponential backoff (with jitter) +// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation +func backoffTimeout(attempt uint16) time.Duration { + // nolint:gosec // G404: Use of weak random number generator + return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond +} diff --git a/lite2/provider/http/http_test.go b/light/provider/http/http_test.go similarity index 55% rename from lite2/provider/http/http_test.go rename to light/provider/http/http_test.go index b07dbb9ff..b6b3989a8 100644 --- a/lite2/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -1,6 +1,7 @@ package http_test import ( + "context" "fmt" "os" "testing" @@ -9,29 +10,31 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/lite2/provider/http" - litehttp "github.com/tendermint/tendermint/lite2/provider/http" + "github.com/tendermint/tendermint/light/provider" + lighthttp "github.com/tendermint/tendermint/light/provider/http" rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) func TestNewProvider(t *testing.T) { - c, err := http.New("chain-test", "192.168.0.1:26657") + c, err := lighthttp.New("chain-test", "192.168.0.1:26657") require.NoError(t, err) require.Equal(t, fmt.Sprintf("%s", c), "http{http://192.168.0.1:26657}") - c, err = http.New("chain-test", "http://153.200.0.1:26657") + c, err = lighthttp.New("chain-test", "http://153.200.0.1:26657") require.NoError(t, err) require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1:26657}") - c, err = http.New("chain-test", "153.200.0.1") + c, err = lighthttp.New("chain-test", "153.200.0.1") require.NoError(t, err) require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}") } func TestMain(m *testing.M) { app := kvstore.NewApplication() + app.RetainBlocks = 9 node := rpctest.StartTendermint(app) code := m.Run() @@ -50,26 +53,38 @@ func TestProvider(t *testing.T) { } chainID := genDoc.ChainID t.Log("chainID:", chainID) - p, err := litehttp.New(chainID, rpcAddr) + + c, err := rpchttp.New(rpcAddr, "/websocket") require.Nil(t, err) + + p := lighthttp.NewWithClient(chainID, c) + require.NoError(t, err) require.NotNil(t, p) // let it produce some blocks - err = rpcclient.WaitForHeight(p.(rpcclient.StatusClient), 6, nil) - require.Nil(t, err) + err = rpcclient.WaitForHeight(c, 10, nil) + require.NoError(t, err) // let's get the highest block - sh, err := p.SignedHeader(0) - - require.Nil(t, err, "%+v", err) - assert.True(t, sh.Height < 5000) + sh, err := p.LightBlock(context.Background(), 0) + require.NoError(t, err) + assert.True(t, sh.Height < 1000) // let's check this is valid somehow assert.Nil(t, sh.ValidateBasic(chainID)) // historical queries now work :) - lower := sh.Height - 5 - sh, err = p.SignedHeader(lower) - assert.Nil(t, err, "%+v", err) + lower := sh.Height - 3 + sh, err = p.LightBlock(context.Background(), lower) + require.NoError(t, err) assert.Equal(t, lower, sh.Height) + + // fetching missing heights (both future and pruned) should return appropriate errors + _, err = p.LightBlock(context.Background(), 1000) + require.Error(t, err) + assert.Equal(t, provider.ErrLightBlockNotFound, err) + + _, err = p.LightBlock(context.Background(), 1) + require.Error(t, err) + assert.Equal(t, provider.ErrLightBlockNotFound, err) } diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go new file mode 100644 index 000000000..e32e6372a --- /dev/null +++ b/light/provider/mock/deadmock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "context" + "errors" + + "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/types" +) + +var errNoResp = errors.New("no response from provider") + +type deadMock struct { + chainID string +} + +// NewDeadMock creates a mock provider that always errors. +func NewDeadMock(chainID string) provider.Provider { + return &deadMock{chainID: chainID} +} + +func (p *deadMock) ChainID() string { return p.chainID } + +func (p *deadMock) String() string { return "deadMock" } + +func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { + return nil, errNoResp +} + +func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error { + return errNoResp +} diff --git a/light/provider/mock/mock.go b/light/provider/mock/mock.go new file mode 100644 index 000000000..cf28846ef --- /dev/null +++ b/light/provider/mock/mock.go @@ -0,0 +1,91 @@ +package mock + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/types" +) + +type Mock struct { + chainID string + headers map[int64]*types.SignedHeader + vals map[int64]*types.ValidatorSet + evidenceToReport map[string]types.Evidence // hash => evidence +} + +var _ provider.Provider = (*Mock)(nil) + +// New creates a mock provider with the given set of headers and validator +// sets. +func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) *Mock { + return &Mock{ + chainID: chainID, + headers: headers, + vals: vals, + evidenceToReport: make(map[string]types.Evidence), + } +} + +// ChainID returns the blockchain ID. +func (p *Mock) ChainID() string { + return p.chainID +} + +func (p *Mock) String() string { + var headers strings.Builder + for _, h := range p.headers { + fmt.Fprintf(&headers, " %d:%X", h.Height, h.Hash()) + } + + var vals strings.Builder + for _, v := range p.vals { + fmt.Fprintf(&vals, " %X", v.Hash()) + } + + return fmt.Sprintf("Mock{headers: %s, vals: %v}", headers.String(), vals.String()) +} + +func (p *Mock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { + var lb *types.LightBlock + if height == 0 && len(p.headers) > 0 { + sh := p.headers[int64(len(p.headers))] + vals := p.vals[int64(len(p.vals))] + lb = &types.LightBlock{ + SignedHeader: sh, + ValidatorSet: vals, + } + + } + if _, ok := p.headers[height]; ok { + sh := p.headers[height] + vals := p.vals[height] + lb = &types.LightBlock{ + SignedHeader: sh, + ValidatorSet: vals, + } + } + if lb == nil { + return nil, provider.ErrLightBlockNotFound + } + if lb.SignedHeader == nil || lb.ValidatorSet == nil { + return nil, provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")} + } + if err := lb.ValidateBasic(lb.ChainID); err != nil { + return nil, provider.ErrBadLightBlock{Reason: err} + } + return lb, nil +} + +func (p *Mock) ReportEvidence(_ context.Context, ev types.Evidence) error { + p.evidenceToReport[string(ev.Hash())] = ev + return nil +} + +func (p *Mock) HasEvidence(ev types.Evidence) bool { + _, ok := p.evidenceToReport[string(ev.Hash())] + return ok +} diff --git a/light/provider/provider.go b/light/provider/provider.go new file mode 100644 index 000000000..80e8dbc15 --- /dev/null +++ b/light/provider/provider.go @@ -0,0 +1,29 @@ +package provider + +import ( + "context" + + "github.com/tendermint/tendermint/types" +) + +// Provider provides information for the light client to sync (verification +// happens in the client). +type Provider interface { + // ChainID returns the blockchain ID. + ChainID() string + + // LightBlock returns the LightBlock that corresponds to the given + // height. + // + // 0 - the latest. + // height must be >= 0. + // + // If the provider fails to fetch the LightBlock due to the IO or other + // issues, an error will be returned. + // If there's no LightBlock for the given height, ErrLightBlockNotFound + // error is returned. + LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) + + // ReportEvidence reports an evidence of misbehavior. + ReportEvidence(context.Context, types.Evidence) error +} diff --git a/lite2/proxy/proxy.go b/light/proxy/proxy.go similarity index 79% rename from lite2/proxy/proxy.go rename to light/proxy/proxy.go index 0bfa12bad..c5b71b0ad 100644 --- a/lite2/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -2,25 +2,20 @@ package proxy import ( "context" + "fmt" "net" "net/http" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - lrpc "github.com/tendermint/tendermint/lite2/rpc" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + lrpc "github.com/tendermint/tendermint/light/rpc" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" ) // A Proxy defines parameters for running an HTTP server proxy. type Proxy struct { Addr string // TCP address to listen on, ":http" if empty Config *rpcserver.Config - Codec *amino.Codec Client *lrpc.Client Logger log.Logger Listener net.Listener @@ -37,7 +32,7 @@ func (p *Proxy) ListenAndServe() error { } p.Listener = listener - return rpcserver.StartHTTPServer( + return rpcserver.Serve( listener, mux, p.Logger, @@ -55,7 +50,7 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { } p.Listener = listener - return rpcserver.StartHTTPAndTLSServer( + return rpcserver.ServeTLS( listener, mux, certFile, @@ -66,17 +61,15 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { } func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { - ctypes.RegisterAmino(p.Codec) - mux := http.NewServeMux() // 1) Register regular routes. r := RPCRoutes(p.Client) - rpcserver.RegisterRPCFuncs(mux, r, p.Codec, p.Logger) + rpcserver.RegisterRPCFuncs(mux, r, p.Logger) // 2) Allow websocket connections. wmLogger := p.Logger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(r, p.Codec, + wm := rpcserver.NewWebsocketManager(r, rpcserver.OnDisconnect(func(remoteAddr string) { err := p.Client.UnsubscribeAll(context.Background(), remoteAddr) if err != nil && err != tmpubsub.ErrSubscriptionNotFound { @@ -91,7 +84,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { // 3) Start a client. if !p.Client.IsRunning() { if err := p.Client.Start(); err != nil { - return nil, mux, errors.Wrap(err, "Client#Start") + return nil, mux, fmt.Errorf("can't start client: %w", err) } } diff --git a/lite2/proxy/routes.go b/light/proxy/routes.go similarity index 76% rename from lite2/proxy/routes.go rename to light/proxy/routes.go index f7d5cd25b..0ed7f9433 100644 --- a/lite2/proxy/routes.go +++ b/light/proxy/routes.go @@ -2,10 +2,11 @@ package proxy import ( "github.com/tendermint/tendermint/libs/bytes" - lrpc "github.com/tendermint/tendermint/lite2/rpc" + lrpc "github.com/tendermint/tendermint/light/rpc" + rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -23,6 +24,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"), "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""), "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"), + "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash"), "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height"), "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"), "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"), @@ -52,7 +54,7 @@ type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return c.Health() + return c.Health(ctx.Context()) } } @@ -61,7 +63,7 @@ type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) // nolint: interfacer func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { - return c.Status() + return c.Status(ctx.Context()) } } @@ -69,7 +71,7 @@ type rpcNetInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ct func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultNetInfo, error) { - return c.NetInfo() + return c.NetInfo(ctx.Context()) } } @@ -77,7 +79,7 @@ type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int6 func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return c.BlockchainInfo(minHeight, maxHeight) + return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) } } @@ -85,7 +87,7 @@ type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return c.Genesis() + return c.Genesis(ctx.Context()) } } @@ -93,7 +95,15 @@ type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBloc func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { - return c.Block(height) + return c.Block(ctx.Context(), height) + } +} + +type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) + +func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { + return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + return c.BlockByHash(ctx.Context(), hash) } } @@ -101,7 +111,7 @@ type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.Res func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) { - return c.BlockResults(height) + return c.BlockResults(ctx.Context(), height) } } @@ -109,7 +119,7 @@ type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCom func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { - return c.Commit(height) + return c.Commit(ctx.Context(), height) } } @@ -117,26 +127,26 @@ type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.Res func makeTxFunc(c *lrpc.Client) rpcTxFunc { return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { - return c.Tx(hash, prove) + return c.Tx(ctx.Context(), hash, prove) } } type rpcTxSearchFunc func(ctx *rpctypes.Context, query string, prove bool, - page, perPage int, orderBy string) (*ctypes.ResultTxSearch, error) + page, perPage *int, orderBy string) (*ctypes.ResultTxSearch, error) func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { - return func(ctx *rpctypes.Context, query string, prove bool, page, perPage int, orderBy string) ( + return func(ctx *rpctypes.Context, query string, prove bool, page, perPage *int, orderBy string) ( *ctypes.ResultTxSearch, error) { - return c.TxSearch(query, prove, page, perPage, orderBy) + return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) } } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage int) (*ctypes.ResultValidators, error) + page, perPage *int) (*ctypes.ResultValidators, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return c.Validators(height, page, perPage) + return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return c.Validators(ctx.Context(), height, page, perPage) } } @@ -144,7 +154,7 @@ type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpCo func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { - return c.DumpConsensusState() + return c.DumpConsensusState(ctx.Context()) } } @@ -152,7 +162,7 @@ type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusS func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { - return c.ConsensusState() + return c.ConsensusState(ctx.Context()) } } @@ -160,15 +170,15 @@ type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes. func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) { - return c.ConsensusParams(height) + return c.ConsensusParams(ctx.Context(), height) } } -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) +type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { - return c.UnconfirmedTxs(limit) + return func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { + return c.UnconfirmedTxs(ctx.Context(), limit) } } @@ -176,7 +186,7 @@ type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfi func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return c.NumUnconfirmedTxs() + return c.NumUnconfirmedTxs(ctx.Context()) } } @@ -184,7 +194,7 @@ type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes. func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return c.BroadcastTxCommit(tx) + return c.BroadcastTxCommit(ctx.Context(), tx) } } @@ -192,7 +202,7 @@ type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.Re func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.BroadcastTxSync(tx) + return c.BroadcastTxSync(ctx.Context(), tx) } } @@ -200,15 +210,21 @@ type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.R func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.BroadcastTxAsync(tx) + return c.BroadcastTxAsync(ctx.Context(), tx) } } -type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) +type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, + data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { - return func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQuery(path, data) + return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, + height int64, prove bool) (*ctypes.ResultABCIQuery, error) { + + return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ + Height: height, + Prove: prove, + }) } } @@ -216,7 +232,7 @@ type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - return c.ABCIInfo() + return c.ABCIInfo(ctx.Context()) } } @@ -225,6 +241,6 @@ type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*c // nolint: interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - return c.BroadcastEvidence(ev) + return c.BroadcastEvidence(ctx.Context(), ev) } } diff --git a/light/rpc/client.go b/light/rpc/client.go new file mode 100644 index 000000000..cdf16e524 --- /dev/null +++ b/light/rpc/client.go @@ -0,0 +1,605 @@ +package rpc + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmmath "github.com/tendermint/tendermint/libs/math" + service "github.com/tendermint/tendermint/libs/service" + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/types" +) + +var errNegOrZeroHeight = errors.New("negative or zero height") + +// KeyPathFunc builds a merkle path out of the given path and key. +type KeyPathFunc func(path string, key []byte) (merkle.KeyPath, error) + +// LightClient is an interface that contains functionality needed by Client from the light client. +type LightClient interface { + ChainID() string + VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) + TrustedLightBlock(height int64) (*types.LightBlock, error) +} + +// Client is an RPC client, which uses light#Client to verify data (if it can +// be proved!). merkle.DefaultProofRuntime is used to verify values returned by +// ABCIQuery. +type Client struct { + service.BaseService + + next rpcclient.Client + lc LightClient + // Proof runtime used to verify values returned by ABCIQuery + prt *merkle.ProofRuntime + keyPathFn KeyPathFunc +} + +var _ rpcclient.Client = (*Client)(nil) + +// Option allow you to tweak Client. +type Option func(*Client) + +// KeyPathFn option can be used to set a function, which parses a given path +// and builds the merkle path for the prover. It must be provided if you want +// to call ABCIQuery or ABCIQueryWithOptions. +func KeyPathFn(fn KeyPathFunc) Option { + return func(c *Client) { + c.keyPathFn = fn + } +} + +// NewClient returns a new client. +func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { + c := &Client{ + next: next, + lc: lc, + prt: merkle.DefaultProofRuntime(), + } + c.BaseService = *service.NewBaseService(nil, "Client", c) + for _, o := range opts { + o(c) + } + return c +} + +func (c *Client) OnStart() error { + if !c.next.IsRunning() { + return c.next.Start() + } + return nil +} + +func (c *Client) OnStop() { + if c.next.IsRunning() { + if err := c.next.Stop(); err != nil { + c.Logger.Error("Error stopping on next", "err", err) + } + } +} + +func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { + return c.next.Status(ctx) +} + +func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { + return c.next.ABCIInfo(ctx) +} + +// ABCIQuery requests proof by default. +func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) +} + +// ABCIQueryWithOptions returns an error if opts.Prove is false. +func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + + // always request the proof + opts.Prove = true + + res, err := c.next.ABCIQueryWithOptions(ctx, path, data, opts) + if err != nil { + return nil, err + } + resp := res.Response + + // Validate the response. + if resp.IsErr() { + return nil, fmt.Errorf("err response code: %v", resp.Code) + } + if len(resp.Key) == 0 { + return nil, errors.New("empty key") + } + if resp.ProofOps == nil || len(resp.ProofOps.Ops) == 0 { + return nil, errors.New("no proof ops") + } + if resp.Height <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + // NOTE: AppHash for height H is in header H+1. + l, err := c.updateLightClientIfNeededTo(ctx, resp.Height+1) + if err != nil { + return nil, err + } + + // Validate the value proof against the trusted header. + if resp.Value != nil { + // 1) build a Merkle key path from path and resp.Key + if c.keyPathFn == nil { + return nil, errors.New("please configure Client with KeyPathFn option") + } + + kp, err := c.keyPathFn(path, resp.Key) + if err != nil { + return nil, fmt.Errorf("can't build merkle key path: %w", err) + } + + // 2) verify value + err = c.prt.VerifyValue(resp.ProofOps, l.AppHash, kp.String(), resp.Value) + if err != nil { + return nil, fmt.Errorf("verify value proof: %w", err) + } + } else { // OR validate the absence proof against the trusted header. + err = c.prt.VerifyAbsence(resp.ProofOps, l.AppHash, string(resp.Key)) + if err != nil { + return nil, fmt.Errorf("verify absence proof: %w", err) + } + } + + return &ctypes.ResultABCIQuery{Response: resp}, nil +} + +func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return c.next.BroadcastTxCommit(ctx, tx) +} + +func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.next.BroadcastTxAsync(ctx, tx) +} + +func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return c.next.BroadcastTxSync(ctx, tx) +} + +func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { + return c.next.UnconfirmedTxs(ctx, limit) +} + +func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return c.next.NumUnconfirmedTxs(ctx) +} + +func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + return c.next.CheckTx(ctx, tx) +} + +func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { + return c.next.NetInfo(ctx) +} + +func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { + return c.next.DumpConsensusState(ctx) +} + +func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { + return c.next.ConsensusState(ctx) +} + +func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { + res, err := c.next.ConsensusParams(ctx, height) + if err != nil { + return nil, err + } + + // Validate res. + if err := types.ValidateConsensusParams(res.ConsensusParams); err != nil { + return nil, err + } + if res.BlockHeight <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + l, err := c.updateLightClientIfNeededTo(ctx, res.BlockHeight) + if err != nil { + return nil, err + } + + // Verify hash. + if cH, tH := types.HashConsensusParams(res.ConsensusParams), l.ConsensusHash; !bytes.Equal(cH, tH) { + return nil, fmt.Errorf("params hash %X does not match trusted hash %X", + cH, tH) + } + + return res, nil +} + +func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { + return c.next.Health(ctx) +} + +// BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header +// returned. +func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) + if err != nil { + return nil, err + } + + // Validate res. + for i, meta := range res.BlockMetas { + if meta == nil { + return nil, fmt.Errorf("nil block meta %d", i) + } + if err := meta.ValidateBasic(); err != nil { + return nil, fmt.Errorf("invalid block meta %d: %w", i, err) + } + } + + // Update the light client if we're behind. + if len(res.BlockMetas) > 0 { + lastHeight := res.BlockMetas[len(res.BlockMetas)-1].Header.Height + if _, err := c.updateLightClientIfNeededTo(ctx, lastHeight); err != nil { + return nil, err + } + } + + // Verify each of the BlockMetas. + for _, meta := range res.BlockMetas { + h, err := c.lc.TrustedLightBlock(meta.Header.Height) + if err != nil { + return nil, fmt.Errorf("trusted header %d: %w", meta.Header.Height, err) + } + if bmH, tH := meta.Header.Hash(), h.Hash(); !bytes.Equal(bmH, tH) { + return nil, fmt.Errorf("block meta header %X does not match with trusted header %X", + bmH, tH) + } + } + + return res, nil +} + +func (c *Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { + return c.next.Genesis(ctx) +} + +// Block calls rpcclient#Block and then verifies the result. +func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { + res, err := c.next.Block(ctx, height) + if err != nil { + return nil, err + } + + // Validate res. + if err := res.BlockID.ValidateBasic(); err != nil { + return nil, err + } + if err := res.Block.ValidateBasic(); err != nil { + return nil, err + } + if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { + return nil, fmt.Errorf("blockID %X does not match with block %X", + bmH, bH) + } + + // Update the light client if we're behind. + l, err := c.updateLightClientIfNeededTo(ctx, res.Block.Height) + if err != nil { + return nil, err + } + + // Verify block. + if bH, tH := res.Block.Hash(), l.Hash(); !bytes.Equal(bH, tH) { + return nil, fmt.Errorf("block header %X does not match with trusted header %X", + bH, tH) + } + + return res, nil +} + +// BlockByHash calls rpcclient#BlockByHash and then verifies the result. +func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + res, err := c.next.BlockByHash(ctx, hash) + if err != nil { + return nil, err + } + + // Validate res. + if err := res.BlockID.ValidateBasic(); err != nil { + return nil, err + } + if err := res.Block.ValidateBasic(); err != nil { + return nil, err + } + if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { + return nil, fmt.Errorf("blockID %X does not match with block %X", + bmH, bH) + } + + // Update the light client if we're behind. + l, err := c.updateLightClientIfNeededTo(ctx, res.Block.Height) + if err != nil { + return nil, err + } + + // Verify block. + if bH, tH := res.Block.Hash(), l.Hash(); !bytes.Equal(bH, tH) { + return nil, fmt.Errorf("block header %X does not match with trusted header %X", + bH, tH) + } + + return res, nil +} + +// BlockResults returns the block results for the given height. If no height is +// provided, the results of the block preceding the latest are returned. +func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { + var h int64 + if height == nil { + res, err := c.next.Status(ctx) + if err != nil { + return nil, fmt.Errorf("can't get latest height: %w", err) + } + // Can't return the latest block results here because we won't be able to + // prove them. Return the results for the previous block instead. + h = res.SyncInfo.LatestBlockHeight - 1 + } else { + h = *height + } + + res, err := c.next.BlockResults(ctx, &h) + if err != nil { + return nil, err + } + + // Validate res. + if res.Height <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + trustedBlock, err := c.updateLightClientIfNeededTo(ctx, h+1) + if err != nil { + return nil, err + } + + // proto-encode BeginBlock events + bbeBytes, err := proto.Marshal(&abci.ResponseBeginBlock{ + Events: res.BeginBlockEvents, + }) + if err != nil { + return nil, err + } + + // Build a Merkle tree of proto-encoded DeliverTx results and get a hash. + results := types.NewResults(res.TxsResults) + + // proto-encode EndBlock events. + ebeBytes, err := proto.Marshal(&abci.ResponseEndBlock{ + Events: res.EndBlockEvents, + }) + if err != nil { + return nil, err + } + + // Build a Merkle tree out of the above 3 binary slices. + rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash(), ebeBytes}) + + // Verify block results. + if !bytes.Equal(rH, trustedBlock.LastResultsHash) { + return nil, fmt.Errorf("last results %X does not match with trusted last results %X", + rH, trustedBlock.LastResultsHash) + } + + return res, nil +} + +func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { + // Update the light client if we're behind and retrieve the light block at the requested height + l, err := c.updateLightClientIfNeededTo(ctx, *height) + if err != nil { + return nil, err + } + + return &ctypes.ResultCommit{ + SignedHeader: *l.SignedHeader, + CanonicalCommit: true, + }, nil +} + +// Tx calls rpcclient#Tx method and then verifies the proof if such was +// requested. +func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + res, err := c.next.Tx(ctx, hash, prove) + if err != nil || !prove { + return res, err + } + + // Validate res. + if res.Height <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + l, err := c.updateLightClientIfNeededTo(ctx, res.Height) + if err != nil { + return nil, err + } + + // Validate the proof. + return res, res.Proof.Validate(l.DataHash) +} + +func (c *Client) TxSearch(ctx context.Context, query string, prove bool, page, perPage *int, orderBy string) ( + *ctypes.ResultTxSearch, error) { + return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy) +} + +// Validators fetches and verifies validators. +func (c *Client) Validators(ctx context.Context, height *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, + error) { + // Update the light client if we're behind and retrieve the light block at the requested height. + l, err := c.updateLightClientIfNeededTo(ctx, *height) + if err != nil { + return nil, err + } + + totalCount := len(l.ValidatorSet.Validators) + perPage := validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) + if err != nil { + return nil, err + } + + skipCount := validateSkipCount(page, perPage) + + v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + + return &ctypes.ResultValidators{ + BlockHeight: *height, + Validators: v, + Count: len(v), + Total: totalCount}, nil +} + +func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return c.next.BroadcastEvidence(ctx, ev) +} + +func (c *Client) Subscribe(ctx context.Context, subscriber, query string, + outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + return c.next.Subscribe(ctx, subscriber, query, outCapacity...) +} + +func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { + return c.next.Unsubscribe(ctx, subscriber, query) +} + +func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { + return c.next.UnsubscribeAll(ctx, subscriber) +} + +func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height int64) (*types.LightBlock, error) { + l, err := c.lc.VerifyLightBlockAtHeight(ctx, height, time.Now()) + if err != nil { + return nil, fmt.Errorf("failed to update light client to %d: %w", height, err) + } + return l, nil +} + +func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { + c.prt.RegisterOpDecoder(typ, dec) +} + +// SubscribeWS subscribes for events using the given query and remote address as +// a subscriber, but does not verify responses (UNSAFE)! +// TODO: verify data +func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) + if err != nil { + return nil, err + } + + go func() { + for { + select { + case resultEvent := <-out: + // We should have a switch here that performs a validation + // depending on the event's type. + ctx.WSConn.TryWriteRPCResponse( + rpctypes.NewRPCSuccessResponse( + rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), + resultEvent, + )) + case <-c.Quit(): + return + } + } + }() + + return &ctypes.ResultSubscribe{}, nil +} + +// UnsubscribeWS calls original client's Unsubscribe using remote address as a +// subscriber. +func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + +// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address +// as a subscriber. +func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} + +// XXX: Copied from rpc/core/env.go +const ( + // see README + defaultPerPage = 30 + maxPerPage = 100 +) + +func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { + if perPage < 1 { + panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) + } + + if pagePtr == nil { // no page parameter + return 1, nil + } + + pages := ((totalCount - 1) / perPage) + 1 + if pages == 0 { + pages = 1 // one page (even if it's empty) + } + page := *pagePtr + if page <= 0 || page > pages { + return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) + } + + return page, nil +} + +func validatePerPage(perPagePtr *int) int { + if perPagePtr == nil { // no per_page parameter + return defaultPerPage + } + + perPage := *perPagePtr + if perPage < 1 { + return defaultPerPage + } else if perPage > maxPerPage { + return maxPerPage + } + return perPage +} + +func validateSkipCount(page, perPage int) int { + skipCount := (page - 1) * perPage + if skipCount < 0 { + return 0 + } + + return skipCount +} diff --git a/light/rpc/client_test.go b/light/rpc/client_test.go new file mode 100644 index 000000000..f614f44d3 --- /dev/null +++ b/light/rpc/client_test.go @@ -0,0 +1,152 @@ +package rpc + +import ( + "context" + "encoding/hex" + "fmt" + "testing" + + ics23 "github.com/confio/ics23/go" + "github.com/cosmos/iavl" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/bytes" + lcmock "github.com/tendermint/tendermint/light/rpc/mocks" + tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + rpcmock "github.com/tendermint/tendermint/rpc/client/mocks" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" +) + +// TestABCIQuery tests ABCIQuery requests and verifies proofs. HAPPY PATH 😀 +func TestABCIQuery(t *testing.T) { + tree, err := iavl.NewMutableTree(dbm.NewMemDB(), 100) + require.NoError(t, err) + + var ( + key = []byte("foo") + value = []byte("bar") + ) + tree.Set(key, value) + + commitmentProof, err := tree.GetMembershipProof(key) + require.NoError(t, err) + + op := &testOp{ + Spec: ics23.IavlSpec, + Key: key, + Proof: commitmentProof, + } + + next := &rpcmock.Client{} + next.On( + "ABCIQueryWithOptions", + context.Background(), + mock.AnythingOfType("string"), + bytes.HexBytes(key), + mock.AnythingOfType("client.ABCIQueryOptions"), + ).Return(&ctypes.ResultABCIQuery{ + Response: abci.ResponseQuery{ + Code: 0, + Key: key, + Value: value, + Height: 1, + ProofOps: &tmcrypto.ProofOps{ + Ops: []tmcrypto.ProofOp{op.ProofOp()}, + }, + }, + }, nil) + + lc := &lcmock.LightClient{} + appHash, _ := hex.DecodeString("5EFD44055350B5CC34DBD26085347A9DBBE44EA192B9286A9FC107F40EA1FAC5") + lc.On("VerifyLightBlockAtHeight", context.Background(), int64(2), mock.AnythingOfType("time.Time")).Return( + &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: &types.Header{AppHash: appHash}, + }, + }, + nil, + ) + + c := NewClient(next, lc, + KeyPathFn(func(_ string, key []byte) (merkle.KeyPath, error) { + kp := merkle.KeyPath{} + kp = kp.AppendKey(key, merkle.KeyEncodingURL) + return kp, nil + })) + c.RegisterOpDecoder("ics23:iavl", testOpDecoder) + res, err := c.ABCIQuery(context.Background(), "/store/accounts/key", key) + require.NoError(t, err) + + assert.NotNil(t, res) +} + +type testOp struct { + Spec *ics23.ProofSpec + Key []byte + Proof *ics23.CommitmentProof +} + +var _ merkle.ProofOperator = testOp{} + +func (op testOp) GetKey() []byte { + return op.Key +} + +func (op testOp) ProofOp() tmcrypto.ProofOp { + bz, err := op.Proof.Marshal() + if err != nil { + panic(err.Error()) + } + return tmcrypto.ProofOp{ + Type: "ics23:iavl", + Key: op.Key, + Data: bz, + } +} + +func (op testOp) Run(args [][]byte) ([][]byte, error) { + // calculate root from proof + root, err := op.Proof.Calculate() + if err != nil { + return nil, fmt.Errorf("could not calculate root for proof: %v", err) + } + // Only support an existence proof or nonexistence proof (batch proofs currently unsupported) + switch len(args) { + case 0: + // Args are nil, so we verify the absence of the key. + absent := ics23.VerifyNonMembership(op.Spec, root, op.Proof, op.Key) + if !absent { + return nil, fmt.Errorf("proof did not verify absence of key: %s", string(op.Key)) + } + case 1: + // Args is length 1, verify existence of key with value args[0] + if !ics23.VerifyMembership(op.Spec, root, op.Proof, op.Key, args[0]) { + return nil, fmt.Errorf("proof did not verify existence of key %s with given value %x", op.Key, args[0]) + } + default: + return nil, fmt.Errorf("args must be length 0 or 1, got: %d", len(args)) + } + + return [][]byte{root}, nil +} + +func testOpDecoder(pop tmcrypto.ProofOp) (merkle.ProofOperator, error) { + proof := &ics23.CommitmentProof{} + err := proof.Unmarshal(pop.Data) + if err != nil { + return nil, err + } + + op := testOp{ + Key: pop.Key, + Spec: ics23.IavlSpec, + Proof: proof, + } + return op, nil +} diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go new file mode 100644 index 000000000..2f512d881 --- /dev/null +++ b/light/rpc/mocks/light_client.go @@ -0,0 +1,78 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + time "time" + + types "github.com/tendermint/tendermint/types" +) + +// LightClient is an autogenerated mock type for the LightClient type +type LightClient struct { + mock.Mock +} + +// ChainID provides a mock function with given fields: +func (_m *LightClient) ChainID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TrustedLightBlock provides a mock function with given fields: height +func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { + ret := _m.Called(height) + + var r0 *types.LightBlock + if rf, ok := ret.Get(0).(func(int64) *types.LightBlock); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.LightBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyLightBlockAtHeight provides a mock function with given fields: ctx, height, now +func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { + ret := _m.Called(ctx, height, now) + + var r0 *types.LightBlock + if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) *types.LightBlock); ok { + r0 = rf(ctx, height, now) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.LightBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, time.Time) error); ok { + r1 = rf(ctx, height, now) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/lite2/setup.go b/light/setup.go similarity index 78% rename from lite2/setup.go rename to light/setup.go index 50a4a9d21..af72301b0 100644 --- a/lite2/setup.go +++ b/light/setup.go @@ -1,20 +1,22 @@ -package lite +package light import ( + "context" "time" - "github.com/tendermint/tendermint/lite2/provider" - "github.com/tendermint/tendermint/lite2/provider/http" - "github.com/tendermint/tendermint/lite2/store" + "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/light/provider/http" + "github.com/tendermint/tendermint/light/store" ) -// NewHTTPClient initiates an instance of a lite client using HTTP addresses -// for both the primary provider and witnesses of the lite client. A trusted +// NewHTTPClient initiates an instance of a light client using HTTP addresses +// for both the primary provider and witnesses of the light client. A trusted // header and hash must be passed to initialize the client. // // See all Option(s) for the additional configuration. // See NewClient. func NewHTTPClient( + ctx context.Context, chainID string, trustOptions TrustOptions, primaryAddress string, @@ -28,6 +30,7 @@ func NewHTTPClient( } return NewClient( + ctx, chainID, trustOptions, providers[len(providers)-1], @@ -36,7 +39,7 @@ func NewHTTPClient( options...) } -// NewHTTPClientFromTrustedStore initiates an instance of a lite client using +// NewHTTPClientFromTrustedStore initiates an instance of a light client using // HTTP addresses for both the primary provider and witnesses and uses a // trusted store as the root of trust. // diff --git a/light/store/db/db.go b/light/store/db/db.go new file mode 100644 index 000000000..adbb33871 --- /dev/null +++ b/light/store/db/db.go @@ -0,0 +1,330 @@ +package db + +import ( + "encoding/binary" + "fmt" + "regexp" + "strconv" + + dbm "github.com/tendermint/tm-db" + + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/light/store" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +var ( + sizeKey = []byte("size") +) + +type dbs struct { + db dbm.DB + prefix string + + mtx tmsync.RWMutex + size uint16 +} + +// New returns a Store that wraps any DB (with an optional prefix in case you +// want to use one DB with many light clients). +func New(db dbm.DB, prefix string) store.Store { + + size := uint16(0) + bz, err := db.Get(sizeKey) + if err == nil && len(bz) > 0 { + size = unmarshalSize(bz) + } + + return &dbs{db: db, prefix: prefix, size: size} +} + +// SaveLightBlock persists LightBlock to the db. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) SaveLightBlock(lb *types.LightBlock) error { + if lb.Height <= 0 { + panic("negative or zero height") + } + + lbpb, err := lb.ToProto() + if err != nil { + return fmt.Errorf("unable to convert light block to protobuf: %w", err) + } + + lbBz, err := lbpb.Marshal() + if err != nil { + return fmt.Errorf("marshalling LightBlock: %w", err) + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + b := s.db.NewBatch() + defer b.Close() + if err = b.Set(s.lbKey(lb.Height), lbBz); err != nil { + return err + } + if err = b.Set(sizeKey, marshalSize(s.size+1)); err != nil { + return err + } + if err = b.WriteSync(); err != nil { + return err + } + s.size++ + + return nil +} + +// DeleteLightBlockAndValidatorSet deletes the LightBlock from +// the db. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) DeleteLightBlock(height int64) error { + if height <= 0 { + panic("negative or zero height") + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + b := s.db.NewBatch() + defer b.Close() + if err := b.Delete(s.lbKey(height)); err != nil { + return err + } + if err := b.Set(sizeKey, marshalSize(s.size-1)); err != nil { + return err + } + if err := b.WriteSync(); err != nil { + return err + } + s.size-- + + return nil +} + +// LightBlock retrieves the LightBlock at the given height. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) LightBlock(height int64) (*types.LightBlock, error) { + if height <= 0 { + panic("negative or zero height") + } + + bz, err := s.db.Get(s.lbKey(height)) + if err != nil { + panic(err) + } + if len(bz) == 0 { + return nil, store.ErrLightBlockNotFound + } + + var lbpb tmproto.LightBlock + err = lbpb.Unmarshal(bz) + if err != nil { + return nil, fmt.Errorf("unmarshal error: %w", err) + } + + lightBlock, err := types.LightBlockFromProto(&lbpb) + if err != nil { + return nil, fmt.Errorf("proto conversion error: %w", err) + } + + return lightBlock, err +} + +// LastLightBlockHeight returns the last LightBlock height stored. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) LastLightBlockHeight() (int64, error) { + itr, err := s.db.ReverseIterator( + s.lbKey(1), + append(s.lbKey(1<<63-1), byte(0x00)), + ) + if err != nil { + panic(err) + } + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, height, ok := parseLbKey(key) + if ok { + return height, nil + } + itr.Next() + } + + return -1, itr.Error() +} + +// FirstLightBlockHeight returns the first LightBlock height stored. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) FirstLightBlockHeight() (int64, error) { + itr, err := s.db.Iterator( + s.lbKey(1), + append(s.lbKey(1<<63-1), byte(0x00)), + ) + if err != nil { + panic(err) + } + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, height, ok := parseLbKey(key) + if ok { + return height, nil + } + itr.Next() + } + + return -1, itr.Error() +} + +// LightBlockBefore iterates over light blocks until it finds a block before +// the given height. It returns ErrLightBlockNotFound if no such block exists. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) { + if height <= 0 { + panic("negative or zero height") + } + + itr, err := s.db.ReverseIterator( + s.lbKey(1), + s.lbKey(height), + ) + if err != nil { + panic(err) + } + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, existingHeight, ok := parseLbKey(key) + if ok { + return s.LightBlock(existingHeight) + } + itr.Next() + } + if err = itr.Error(); err != nil { + return nil, err + } + + return nil, store.ErrLightBlockNotFound +} + +// Prune prunes header & validator set pairs until there are only size pairs +// left. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) Prune(size uint16) error { + // 1) Check how many we need to prune. + s.mtx.RLock() + sSize := s.size + s.mtx.RUnlock() + + if sSize <= size { // nothing to prune + return nil + } + numToPrune := sSize - size + + // 2) Iterate over headers and perform a batch operation. + itr, err := s.db.Iterator( + s.lbKey(1), + append(s.lbKey(1<<63-1), byte(0x00)), + ) + if err != nil { + return err + } + defer itr.Close() + + b := s.db.NewBatch() + defer b.Close() + + pruned := 0 + for itr.Valid() && numToPrune > 0 { + key := itr.Key() + _, height, ok := parseLbKey(key) + if ok { + if err = b.Delete(s.lbKey(height)); err != nil { + return err + } + } + itr.Next() + numToPrune-- + pruned++ + } + if err = itr.Error(); err != nil { + return err + } + + err = b.WriteSync() + if err != nil { + return err + } + + // 3) Update size. + s.mtx.Lock() + defer s.mtx.Unlock() + + s.size -= uint16(pruned) + + if wErr := s.db.SetSync(sizeKey, marshalSize(s.size)); wErr != nil { + return fmt.Errorf("failed to persist size: %w", wErr) + } + + return nil +} + +// Size returns the number of header & validator set pairs. +// +// Safe for concurrent use by multiple goroutines. +func (s *dbs) Size() uint16 { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.size +} + +func (s *dbs) lbKey(height int64) []byte { + return []byte(fmt.Sprintf("lb/%s/%020d", s.prefix, height)) +} + +var keyPattern = regexp.MustCompile(`^(lb)/([^/]*)/([0-9]+)$`) + +func parseKey(key []byte) (part string, prefix string, height int64, ok bool) { + submatch := keyPattern.FindSubmatch(key) + if submatch == nil { + return "", "", 0, false + } + part = string(submatch[1]) + prefix = string(submatch[2]) + height, err := strconv.ParseInt(string(submatch[3]), 10, 64) + if err != nil { + return "", "", 0, false + } + ok = true // good! + return +} + +func parseLbKey(key []byte) (prefix string, height int64, ok bool) { + var part string + part, prefix, height, ok = parseKey(key) + if part != "lb" { + return "", 0, false + } + return +} + +func marshalSize(size uint16) []byte { + bs := make([]byte, 2) + binary.LittleEndian.PutUint16(bs, size) + return bs +} + +func unmarshalSize(bz []byte) uint16 { + return binary.LittleEndian.Uint16(bz) +} diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go new file mode 100644 index 000000000..ef9710694 --- /dev/null +++ b/light/store/db/db_test.go @@ -0,0 +1,197 @@ +package db + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +func TestLast_FirstLightBlockHeight(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "TestLast_FirstLightBlockHeight") + + // Empty store + height, err := dbStore.LastLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, -1, height) + + height, err = dbStore.FirstLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, -1, height) + + // 1 key + err = dbStore.SaveLightBlock(randLightBlock(int64(1))) + require.NoError(t, err) + + height, err = dbStore.LastLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, 1, height) + + height, err = dbStore.FirstLightBlockHeight() + require.NoError(t, err) + assert.EqualValues(t, 1, height) +} + +func Test_SaveLightBlock(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_SaveLightBlockAndValidatorSet") + + // Empty store + h, err := dbStore.LightBlock(1) + require.Error(t, err) + assert.Nil(t, h) + + // 1 key + err = dbStore.SaveLightBlock(randLightBlock(1)) + require.NoError(t, err) + + size := dbStore.Size() + assert.Equal(t, uint16(1), size) + t.Log(size) + + h, err = dbStore.LightBlock(1) + require.NoError(t, err) + assert.NotNil(t, h) + + // Empty store + err = dbStore.DeleteLightBlock(1) + require.NoError(t, err) + + h, err = dbStore.LightBlock(1) + require.Error(t, err) + assert.Nil(t, h) + +} + +func Test_LightBlockBefore(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_LightBlockBefore") + + assert.Panics(t, func() { + _, _ = dbStore.LightBlockBefore(0) + _, _ = dbStore.LightBlockBefore(100) + }) + + err := dbStore.SaveLightBlock(randLightBlock(int64(2))) + require.NoError(t, err) + + h, err := dbStore.LightBlockBefore(3) + require.NoError(t, err) + if assert.NotNil(t, h) { + assert.EqualValues(t, 2, h.Height) + } +} + +func Test_Prune(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_Prune") + + // Empty store + assert.EqualValues(t, 0, dbStore.Size()) + err := dbStore.Prune(0) + require.NoError(t, err) + + // One header + err = dbStore.SaveLightBlock(randLightBlock(2)) + require.NoError(t, err) + + assert.EqualValues(t, 1, dbStore.Size()) + + err = dbStore.Prune(1) + require.NoError(t, err) + assert.EqualValues(t, 1, dbStore.Size()) + + err = dbStore.Prune(0) + require.NoError(t, err) + assert.EqualValues(t, 0, dbStore.Size()) + + // Multiple headers + for i := 1; i <= 10; i++ { + err = dbStore.SaveLightBlock(randLightBlock(int64(i))) + require.NoError(t, err) + } + + err = dbStore.Prune(11) + require.NoError(t, err) + assert.EqualValues(t, 10, dbStore.Size()) + + err = dbStore.Prune(7) + require.NoError(t, err) + assert.EqualValues(t, 7, dbStore.Size()) +} + +func Test_Concurrency(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_Prune") + + var wg sync.WaitGroup + for i := 1; i <= 100; i++ { + wg.Add(1) + go func(i int64) { + defer wg.Done() + + err := dbStore.SaveLightBlock(randLightBlock(i)) + require.NoError(t, err) + + _, err = dbStore.LightBlock(i) + if err != nil { + t.Log(err) + } + + _, err = dbStore.LastLightBlockHeight() + if err != nil { + t.Log(err) + } + _, err = dbStore.FirstLightBlockHeight() + if err != nil { + t.Log(err) + } + + err = dbStore.Prune(2) + if err != nil { + t.Log(err) + } + _ = dbStore.Size() + + err = dbStore.DeleteLightBlock(1) + if err != nil { + t.Log(err) + } + }(int64(i)) + } + + wg.Wait() +} + +func randLightBlock(height int64) *types.LightBlock { + vals, _ := types.RandValidatorSet(2, 1) + return &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: &types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 0}, + ChainID: tmrand.Str(12), + Height: height, + Time: time.Now(), + LastBlockID: types.BlockID{}, + LastCommitHash: crypto.CRandBytes(tmhash.Size), + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: crypto.CRandBytes(tmhash.Size), + NextValidatorsHash: crypto.CRandBytes(tmhash.Size), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: crypto.CRandBytes(tmhash.Size), + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + }, + Commit: &types.Commit{}, + }, + ValidatorSet: vals, + } +} diff --git a/light/store/errors.go b/light/store/errors.go new file mode 100644 index 000000000..099b5964d --- /dev/null +++ b/light/store/errors.go @@ -0,0 +1,9 @@ +package store + +import "errors" + +var ( + // ErrLightBlockNotFound is returned when a store does not have the + // requested header. + ErrLightBlockNotFound = errors.New("light block not found") +) diff --git a/light/store/store.go b/light/store/store.go new file mode 100644 index 000000000..7c29f233d --- /dev/null +++ b/light/store/store.go @@ -0,0 +1,48 @@ +package store + +import "github.com/tendermint/tendermint/types" + +// Store is anything that can persistently store headers. +type Store interface { + // SaveSignedHeaderAndValidatorSet saves a SignedHeader (h: sh.Height) and a + // ValidatorSet (h: sh.Height). + // + // height must be > 0. + SaveLightBlock(lb *types.LightBlock) error + + // DeleteSignedHeaderAndValidatorSet deletes SignedHeader (h: height) and + // ValidatorSet (h: height). + // + // height must be > 0. + DeleteLightBlock(height int64) error + + // LightBlock returns the LightBlock that corresponds to the given + // height. + // + // height must be > 0. + // + // If LightBlock is not found, ErrLightBlockNotFound is returned. + LightBlock(height int64) (*types.LightBlock, error) + + // LastLightBlockHeight returns the last (newest) LightBlock height. + // + // If the store is empty, -1 and nil error are returned. + LastLightBlockHeight() (int64, error) + + // FirstLightBlockHeight returns the first (oldest) LightBlock height. + // + // If the store is empty, -1 and nil error are returned. + FirstLightBlockHeight() (int64, error) + + // LightBlockBefore returns the LightBlock before a certain height. + // + // height must be > 0 && <= LastLightBlockHeight. + LightBlockBefore(height int64) (*types.LightBlock, error) + + // Prune removes headers & the associated validator sets when Store reaches a + // defined size (number of header & validator set pairs). + Prune(size uint16) error + + // Size returns a number of currently existing header & validator set pairs. + Size() uint16 +} diff --git a/lite2/trust_options.go b/light/trust_options.go similarity index 93% rename from lite2/trust_options.go rename to light/trust_options.go index 7bd36fe5c..cbf3b1cd8 100644 --- a/lite2/trust_options.go +++ b/light/trust_options.go @@ -1,10 +1,10 @@ -package lite +package light import ( + "errors" + "fmt" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -44,7 +44,7 @@ func (opts TrustOptions) ValidateBasic() error { return errors.New("negative or zero height") } if len(opts.Hash) != tmhash.Size { - return errors.Errorf("expected hash size to be %d bytes, got %d bytes", + return fmt.Errorf("expected hash size to be %d bytes, got %d bytes", tmhash.Size, len(opts.Hash), ) diff --git a/lite2/verifier.go b/light/verifier.go similarity index 72% rename from lite2/verifier.go rename to light/verifier.go index 1ef54677b..0ea4c7332 100644 --- a/lite2/verifier.go +++ b/light/verifier.go @@ -1,11 +1,11 @@ -package lite +package light import ( "bytes" + "errors" + "fmt" "time" - "github.com/pkg/errors" - tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) @@ -29,8 +29,8 @@ var ( // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. +// trustedHeader must have a ChainID, Height and Time func VerifyNonAdjacent( - chainID string, trustedHeader *types.SignedHeader, // height=X trustedVals *types.ValidatorSet, // height=X or height=X+1 untrustedHeader *types.SignedHeader, // height=Y @@ -40,6 +40,8 @@ func VerifyNonAdjacent( maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { + checkRequiredHeaderFields(trustedHeader) + if untrustedHeader.Height == trustedHeader.Height+1 { return errors.New("headers must be non adjacent in height") } @@ -49,7 +51,6 @@ func VerifyNonAdjacent( } if err := verifyNewHeaderAndVals( - chainID, untrustedHeader, untrustedVals, trustedHeader, now, maxClockDrift); err != nil { @@ -57,8 +58,7 @@ func VerifyNonAdjacent( } // Ensure that +`trustLevel` (default 1/3) or more of last trusted validators signed correctly. - err := trustedVals.VerifyCommitTrusting(chainID, untrustedHeader.Commit.BlockID, untrustedHeader.Height, - untrustedHeader.Commit, trustLevel) + err := trustedVals.VerifyCommitLightTrusting(trustedHeader.ChainID, untrustedHeader.Commit, trustLevel) if err != nil { switch e := err.(type) { case types.ErrNotEnoughVotingPowerSigned: @@ -73,8 +73,8 @@ func VerifyNonAdjacent( // NOTE: this should always be the last check because untrustedVals can be // intentionally made very large to DOS the light client. not the case for // VerifyAdjacent, where validator set is known in advance. - if err := untrustedVals.VerifyCommit(chainID, untrustedHeader.Commit.BlockID, untrustedHeader.Height, - untrustedHeader.Commit); err != nil { + if err := untrustedVals.VerifyCommitLight(trustedHeader.ChainID, untrustedHeader.Commit.BlockID, + untrustedHeader.Height, untrustedHeader.Commit); err != nil { return ErrInvalidHeader{err} } @@ -93,8 +93,8 @@ func VerifyNonAdjacent( // // maxClockDrift defines how much untrustedHeader.Time can drift into the // future. +// trustedHeader must have a ChainID, Height, Time and NextValidatorsHash func VerifyAdjacent( - chainID string, trustedHeader *types.SignedHeader, // height=X untrustedHeader *types.SignedHeader, // height=X+1 untrustedVals *types.ValidatorSet, // height=X+1 @@ -102,6 +102,12 @@ func VerifyAdjacent( now time.Time, maxClockDrift time.Duration) error { + checkRequiredHeaderFields(trustedHeader) + + if len(trustedHeader.NextValidatorsHash) == 0 { + panic("next validators hash in trusted header is empty") + } + if untrustedHeader.Height != trustedHeader.Height+1 { return errors.New("headers must be adjacent in height") } @@ -111,7 +117,6 @@ func VerifyAdjacent( } if err := verifyNewHeaderAndVals( - chainID, untrustedHeader, untrustedVals, trustedHeader, now, maxClockDrift); err != nil { @@ -120,16 +125,16 @@ func VerifyAdjacent( // Check the validator hashes are the same if !bytes.Equal(untrustedHeader.ValidatorsHash, trustedHeader.NextValidatorsHash) { - err := errors.Errorf("expected old header next validators (%X) to match those from new header (%X)", + err := fmt.Errorf("expected old header next validators (%X) to match those from new header (%X)", trustedHeader.NextValidatorsHash, untrustedHeader.ValidatorsHash, ) - return err + return ErrInvalidHeader{err} } // Ensure that +2/3 of new validators signed correctly. - if err := untrustedVals.VerifyCommit(chainID, untrustedHeader.Commit.BlockID, untrustedHeader.Height, - untrustedHeader.Commit); err != nil { + if err := untrustedVals.VerifyCommitLight(trustedHeader.ChainID, untrustedHeader.Commit.BlockID, + untrustedHeader.Height, untrustedHeader.Commit); err != nil { return ErrInvalidHeader{err} } @@ -138,7 +143,6 @@ func VerifyAdjacent( // Verify combines both VerifyAdjacent and VerifyNonAdjacent functions. func Verify( - chainID string, trustedHeader *types.SignedHeader, // height=X trustedVals *types.ValidatorSet, // height=X or height=X+1 untrustedHeader *types.SignedHeader, // height=Y @@ -149,53 +153,11 @@ func Verify( trustLevel tmmath.Fraction) error { if untrustedHeader.Height != trustedHeader.Height+1 { - return VerifyNonAdjacent(chainID, trustedHeader, trustedVals, untrustedHeader, untrustedVals, + return VerifyNonAdjacent(trustedHeader, trustedVals, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift, trustLevel) } - return VerifyAdjacent(chainID, trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift) -} - -func verifyNewHeaderAndVals( - chainID string, - untrustedHeader *types.SignedHeader, - untrustedVals *types.ValidatorSet, - trustedHeader *types.SignedHeader, - now time.Time, - maxClockDrift time.Duration) error { - - if err := untrustedHeader.ValidateBasic(chainID); err != nil { - return errors.Wrap(err, "untrustedHeader.ValidateBasic failed") - } - - if untrustedHeader.Height <= trustedHeader.Height { - return errors.Errorf("expected new header height %d to be greater than one of old header %d", - untrustedHeader.Height, - trustedHeader.Height) - } - - if !untrustedHeader.Time.After(trustedHeader.Time) { - return errors.Errorf("expected new header time %v to be after old header time %v", - untrustedHeader.Time, - trustedHeader.Time) - } - - if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { - return errors.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", - untrustedHeader.Time, - now, - maxClockDrift) - } - - if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", - untrustedHeader.ValidatorsHash, - untrustedVals.Hash(), - untrustedHeader.Height, - ) - } - - return nil + return VerifyAdjacent(trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift) } // ValidateTrustLevel checks that trustLevel is within the allowed range [1/3, @@ -205,7 +167,7 @@ func ValidateTrustLevel(lvl tmmath.Fraction) error { if lvl.Numerator*3 < lvl.Denominator || // < 1/3 lvl.Numerator > lvl.Denominator || // > 1 lvl.Denominator == 0 { - return errors.Errorf("trustLevel must be within [1/3, 1], given %v", lvl) + return fmt.Errorf("trustLevel must be within [1/3, 1], given %v", lvl) } return nil } @@ -225,24 +187,84 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time // of the trusted header // // For any of these cases ErrInvalidHeader is returned. -func VerifyBackwards(chainID string, untrustedHeader, trustedHeader *types.SignedHeader) error { - if err := untrustedHeader.ValidateBasic(chainID); err != nil { +func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { + if err := untrustedHeader.ValidateBasic(); err != nil { return ErrInvalidHeader{err} } + if untrustedHeader.ChainID != trustedHeader.ChainID { + return ErrInvalidHeader{errors.New("header belongs to another chain")} + } + if !untrustedHeader.Time.Before(trustedHeader.Time) { return ErrInvalidHeader{ - errors.Errorf("expected older header time %v to be before new header time %v", + fmt.Errorf("expected older header time %v to be before new header time %v", untrustedHeader.Time, trustedHeader.Time)} } if !bytes.Equal(untrustedHeader.Hash(), trustedHeader.LastBlockID.Hash) { return ErrInvalidHeader{ - errors.Errorf("older header hash %X does not match trusted header's last block %X", + fmt.Errorf("older header hash %X does not match trusted header's last block %X", untrustedHeader.Hash(), trustedHeader.LastBlockID.Hash)} } return nil } + +func verifyNewHeaderAndVals( + untrustedHeader *types.SignedHeader, + untrustedVals *types.ValidatorSet, + trustedHeader *types.SignedHeader, + now time.Time, + maxClockDrift time.Duration) error { + + if err := untrustedHeader.ValidateBasic(trustedHeader.ChainID); err != nil { + return fmt.Errorf("untrustedHeader.ValidateBasic failed: %w", err) + } + + if untrustedHeader.Height <= trustedHeader.Height { + return fmt.Errorf("expected new header height %d to be greater than one of old header %d", + untrustedHeader.Height, + trustedHeader.Height) + } + + if !untrustedHeader.Time.After(trustedHeader.Time) { + return fmt.Errorf("expected new header time %v to be after old header time %v", + untrustedHeader.Time, + trustedHeader.Time) + } + + if !untrustedHeader.Time.Before(now.Add(maxClockDrift)) { + return fmt.Errorf("new header has a time from the future %v (now: %v; max clock drift: %v)", + untrustedHeader.Time, + now, + maxClockDrift) + } + + if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { + return fmt.Errorf("expected new header validators (%X) to match those that were supplied (%X) at height %d", + untrustedHeader.ValidatorsHash, + untrustedVals.Hash(), + untrustedHeader.Height, + ) + } + + return nil +} + +func checkRequiredHeaderFields(h *types.SignedHeader) { + if h.Height == 0 { + panic("height in trusted header must be set (non zero") + } + + zeroTime := time.Time{} + if h.Time == zeroTime { + panic("time in trusted header must be set") + } + + if h.ChainID == "" { + panic("chain ID in trusted header must be set") + } +} diff --git a/lite2/verifier_test.go b/light/verifier_test.go similarity index 72% rename from lite2/verifier_test.go rename to light/verifier_test.go index 5a207321d..9e10810b2 100644 --- a/lite2/verifier_test.go +++ b/light/verifier_test.go @@ -1,4 +1,4 @@ -package lite_test +package light_test import ( "fmt" @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" tmmath "github.com/tendermint/tendermint/libs/math" - lite "github.com/tendermint/tendermint/lite2" + "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/types" ) @@ -29,7 +29,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) ) testCases := []struct { @@ -52,18 +52,17 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // different chainID -> error 1: { keys.GenSignedHeader("different-chainID", nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), nil, - "untrustedHeader.ValidateBasic failed: signedHeader belongs to another chain 'different-chainID' not" + - " 'TestVerifyAdjacentHeaders'", + "header belongs to another chain", }, // new header's time is before old header's time -> error 2: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(-1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -73,7 +72,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // new header's time is from the future -> error 3: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(3*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -84,7 +83,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { 4: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(2*time.Hour).Add(maxClockDrift).Add(-1*time.Millisecond), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -94,7 +93,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // 3/3 signed -> no error 5: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -104,7 +103,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // 2/3 signed -> no error 6: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 1, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -114,17 +113,17 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // 1/3 signed -> error 7: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - lite.ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + light.ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // vals does not match with what we have -> error 8: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -134,7 +133,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // vals are inconsistent with newHeader -> error 9: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -144,7 +143,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { // old header has expired -> error 10: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), keys.ToValidators(10, 1), 1 * time.Hour, bTime.Add(1 * time.Hour), @@ -156,7 +155,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := lite.VerifyAdjacent(chainID, header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift) + err := light.VerifyAdjacent(header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift) switch { case tc.expErr != nil && assert.Error(t, err): assert.Equal(t, tc.expErr, err) @@ -182,7 +181,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) // 30, 40, 50 twoThirds = keys[1:] @@ -208,7 +207,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 3/3 new vals signed, 3/3 old vals present -> no error 0: { keys.GenSignedHeader(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -218,7 +217,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 2/3 new vals signed, 3/3 old vals present -> no error 1: { keys.GenSignedHeader(chainID, 4, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 1, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -228,17 +227,17 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 1/3 new vals signed, 3/3 old vals present -> error 2: { keys.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), len(keys)-1, len(keys)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), len(keys)-1, len(keys)), vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - lite.ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + light.ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // 3/3 new vals signed, 2/3 old vals present -> no error 3: { twoThirds.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, twoThirdsVals, twoThirdsVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(twoThirds)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(twoThirds)), twoThirdsVals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -248,7 +247,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 3/3 new vals signed, 1/3 old vals present -> no error 4: { oneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, oneThirdVals, oneThirdVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(oneThird)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(oneThird)), oneThirdVals, 3 * time.Hour, bTime.Add(2 * time.Hour), @@ -258,11 +257,11 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { // 3/3 new vals signed, less than 1/3 old vals present -> error 5: { lessThanOneThird.GenSignedHeader(chainID, 5, bTime.Add(1*time.Hour), nil, lessThanOneThirdVals, lessThanOneThirdVals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(lessThanOneThird)), + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(lessThanOneThird)), lessThanOneThirdVals, 3 * time.Hour, bTime.Add(2 * time.Hour), - lite.ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, + light.ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, "", }, } @@ -270,9 +269,9 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := lite.VerifyNonAdjacent(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, + err := light.VerifyNonAdjacent(header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift, - lite.DefaultTrustLevel) + light.DefaultTrustLevel) switch { case tc.expErr != nil && assert.Error(t, err): @@ -298,10 +297,10 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { vals = keys.ToValidators(20, 10) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) ) - err := lite.Verify(chainID, header, vals, header, vals, 2*time.Hour, time.Now(), maxClockDrift, + err := light.Verify(header, vals, header, vals, 2*time.Hour, time.Now(), maxClockDrift, tmmath.Fraction{Numerator: 2, Denominator: 1}) assert.Error(t, err) } @@ -319,16 +318,14 @@ func TestValidateTrustLevel(t *testing.T) { 4: {tmmath.Fraction{Numerator: 4, Denominator: 5}, true}, // invalid - 5: {tmmath.Fraction{Numerator: 6, Denominator: 5}, false}, - 6: {tmmath.Fraction{Numerator: -1, Denominator: 3}, false}, - 7: {tmmath.Fraction{Numerator: 0, Denominator: 1}, false}, - 8: {tmmath.Fraction{Numerator: -1, Denominator: -3}, false}, - 9: {tmmath.Fraction{Numerator: 0, Denominator: 0}, false}, - 10: {tmmath.Fraction{Numerator: 1, Denominator: 0}, false}, + 5: {tmmath.Fraction{Numerator: 6, Denominator: 5}, false}, + 6: {tmmath.Fraction{Numerator: 0, Denominator: 1}, false}, + 7: {tmmath.Fraction{Numerator: 0, Denominator: 0}, false}, + 8: {tmmath.Fraction{Numerator: 1, Denominator: 0}, false}, } for _, tc := range testCases { - err := lite.ValidateTrustLevel(tc.lvl) + err := light.ValidateTrustLevel(tc.lvl) if !tc.valid { assert.Error(t, err) } else { diff --git a/lite/base_verifier.go b/lite/base_verifier.go deleted file mode 100644 index 6a2a50ab5..000000000 --- a/lite/base_verifier.go +++ /dev/null @@ -1,79 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -var _ Verifier = (*BaseVerifier)(nil) - -// BaseVerifier lets us check the validity of SignedHeaders at height or -// later, requiring sufficient votes (> 2/3) from the given valset. -// To verify blocks produced by a blockchain with mutable validator sets, -// use the DynamicVerifier. -// TODO: Handle unbonding time. -type BaseVerifier struct { - chainID string - height int64 - valset *types.ValidatorSet -} - -// NewBaseVerifier returns a new Verifier initialized with a validator set at -// some height. -func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { - if valset.IsNilOrEmpty() { - panic("NewBaseVerifier requires a valid valset") - } - return &BaseVerifier{ - chainID: chainID, - height: height, - valset: valset, - } -} - -// Implements Verifier. -func (bv *BaseVerifier) ChainID() string { - return bv.chainID -} - -// Implements Verifier. -func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { - - // We can't verify commits for a different chain. - if signedHeader.ChainID != bv.chainID { - return errors.Errorf("BaseVerifier chainID is %v, cannot verify chainID %v", - bv.chainID, signedHeader.ChainID) - } - - // We can't verify commits older than bv.height. - if signedHeader.Height < bv.height { - return errors.Errorf("BaseVerifier height is %v, cannot verify height %v", - bv.height, signedHeader.Height) - } - - // We can't verify with the wrong validator set. - if !bytes.Equal(signedHeader.ValidatorsHash, - bv.valset.Hash()) { - return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash()) - } - - // Do basic sanity checks. - err := signedHeader.ValidateBasic(bv.chainID) - if err != nil { - return errors.Wrap(err, "in verify") - } - - // Check commit signatures. - err = bv.valset.VerifyCommit( - bv.chainID, signedHeader.Commit.BlockID, - signedHeader.Height, signedHeader.Commit) - if err != nil { - return errors.Wrap(err, "in verify") - } - - return nil -} diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go deleted file mode 100644 index 2ef1203fb..000000000 --- a/lite/base_verifier_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package lite - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -func TestBaseCert(t *testing.T) { - assert := assert.New(t) - - keys := genPrivKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a Verifier based on our known set - chainID := "test-static" - cert := NewBaseVerifier(chainID, 2, vals) - - cases := []struct { - keys privKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // height regression - {keys, vals, 1, 0, len(keys), false, false}, - // perfect, signed by everyone - {keys, vals, 2, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 3, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 4, 0, len(keys) - 1, false, false}, - // Changing the power a little bit breaks the static validator. - // The sigs are enough, but the validator hash is unknown. - {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, - } - - for _, tc := range cases { - sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, - []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Verify(sh) - if tc.proper { - assert.Nil(err, "%+v", err) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err) - } - } - } -} diff --git a/lite/client/provider.go b/lite/client/provider.go deleted file mode 100644 index a79a3b9fc..000000000 --- a/lite/client/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package client defines a provider that uses a rpchttp -to get information, which is used to get new headers -and validators directly from a Tendermint client. -*/ -package client - -import ( - "fmt" - - log "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/lite" - lerr "github.com/tendermint/tendermint/lite/errors" - rpcclient "github.com/tendermint/tendermint/rpc/client" - rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" -) - -// SignStatusClient combines a SignClient and StatusClient. -type SignStatusClient interface { - rpcclient.SignClient - rpcclient.StatusClient -} - -type provider struct { - logger log.Logger - chainID string - client SignStatusClient -} - -// NewProvider implements Provider (but not PersistentProvider). -func NewProvider(chainID string, client SignStatusClient) lite.Provider { - return &provider{ - logger: log.NewNopLogger(), - chainID: chainID, - client: client, - } -} - -// NewHTTPProvider can connect to a tendermint json-rpc endpoint -// at the given url, and uses that as a read-only provider. -func NewHTTPProvider(chainID, remote string) (lite.Provider, error) { - httpClient, err := rpchttp.New(remote, "/websocket") - if err != nil { - return nil, err - } - return NewProvider(chainID, httpClient), nil -} - -// Implements Provider. -func (p *provider) SetLogger(logger log.Logger) { - logger = logger.With("module", "lite/client") - p.logger = logger -} - -// StatusClient returns the internal client as a StatusClient -func (p *provider) StatusClient() rpcclient.StatusClient { - return p.client -} - -// LatestFullCommit implements Provider. -func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) { - if chainID != p.chainID { - err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) - return - } - if maxHeight != 0 && maxHeight < minHeight { - err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v", - minHeight, maxHeight) - return - } - commit, err := p.fetchLatestCommit(minHeight, maxHeight) - if err != nil { - return - } - fc, err = p.fillFullCommit(commit.SignedHeader) - return -} - -// fetchLatestCommit fetches the latest commit from the client. -func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) { - status, err := p.client.Status() - if err != nil { - return nil, err - } - if status.SyncInfo.LatestBlockHeight < minHeight { - err = fmt.Errorf("provider is at %v but require minHeight=%v", - status.SyncInfo.LatestBlockHeight, minHeight) - return nil, err - } - if maxHeight == 0 { - maxHeight = status.SyncInfo.LatestBlockHeight - } else if status.SyncInfo.LatestBlockHeight < maxHeight { - maxHeight = status.SyncInfo.LatestBlockHeight - } - return p.client.Commit(&maxHeight) -} - -// Implements Provider. -func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return p.getValidatorSet(chainID, height) -} - -func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - if chainID != p.chainID { - err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) - return - } - if height < 1 { - err = fmt.Errorf("expected height >= 1, got height %v", height) - return - } - res, err := p.client.Validators(&height, 0, 0) - if err != nil { - // TODO pass through other types of errors. - return nil, lerr.ErrUnknownValidators(chainID, height) - } - valset = types.NewValidatorSet(res.Validators) - return -} - -// This does no validation. -func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { - - // Get the validators. - valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) - if err != nil { - return lite.FullCommit{}, err - } - - // Get the next validators. - nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) - if err != nil { - return lite.FullCommit{}, err - } - - return lite.NewFullCommit(signedHeader, valset, nextValset), nil -} diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go deleted file mode 100644 index 1dccdd172..000000000 --- a/lite/client/provider_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package client - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - rpcclient "github.com/tendermint/tendermint/rpc/client" - rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" -) - -func TestMain(m *testing.M) { - app := kvstore.NewApplication() - node := rpctest.StartTendermint(app) - - code := m.Run() - - rpctest.StopTendermint(node) - os.Exit(code) -} - -func TestProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - cfg := rpctest.GetConfig() - defer os.RemoveAll(cfg.RootDir) - rpcAddr := cfg.RPC.ListenAddress - genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) - if err != nil { - panic(err) - } - chainID := genDoc.ChainID - t.Log("chainID:", chainID) - p, err := NewHTTPProvider(chainID, rpcAddr) - require.Nil(err) - require.NotNil(p) - - // let it produce some blocks - err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) - require.Nil(err) - - // let's get the highest block - fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) - - require.Nil(err, "%+v", err) - sh := fc.Height() - assert.True(sh < 5000) - - // let's check this is valid somehow - assert.Nil(fc.ValidateFull(chainID)) - - // historical queries now work :) - lower := sh - 5 - fc, err = p.LatestFullCommit(chainID, lower, lower) - assert.Nil(err, "%+v", err) - assert.Equal(lower, fc.Height()) - -} diff --git a/lite/commit.go b/lite/commit.go deleted file mode 100644 index 6cd354173..000000000 --- a/lite/commit.go +++ /dev/null @@ -1,87 +0,0 @@ -package lite - -import ( - "bytes" - "errors" - "fmt" - - "github.com/tendermint/tendermint/types" -) - -// FullCommit contains a SignedHeader (the block header and a commit that signs it), -// the validator set which signed the commit, and the next validator set. The -// next validator set (which is proven from the block header) allows us to -// revert to block-by-block updating of lite Verifier's latest validator set, -// even in the face of arbitrarily large power changes. -type FullCommit struct { - SignedHeader types.SignedHeader `json:"signed_header"` - Validators *types.ValidatorSet `json:"validator_set"` - NextValidators *types.ValidatorSet `json:"next_validator_set"` -} - -// NewFullCommit returns a new FullCommit. -func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { - return FullCommit{ - SignedHeader: signedHeader, - Validators: valset, - NextValidators: nextValset, - } -} - -// Validate the components and check for consistency. -// This also checks to make sure that Validators actually -// signed the SignedHeader.Commit. -// If > 2/3 did not sign the Commit from fc.Validators, it -// is not a valid commit! -func (fc FullCommit) ValidateFull(chainID string) error { - // Ensure that Validators exists and matches the header. - if fc.Validators.Size() == 0 { - return errors.New("need FullCommit.Validators") - } - if !bytes.Equal( - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash()) { - return fmt.Errorf("header has vhash %X but valset hash is %X", - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash(), - ) - } - // Ensure that NextValidators exists and matches the header. - if fc.NextValidators.Size() == 0 { - return errors.New("need FullCommit.NextValidators") - } - if !bytes.Equal( - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash()) { - return fmt.Errorf("header has next vhash %X but next valset hash is %X", - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash(), - ) - } - // Validate the header. - err := fc.SignedHeader.ValidateBasic(chainID) - if err != nil { - return err - } - // Validate the signatures on the commit. - hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit - return fc.Validators.VerifyCommit( - hdr.ChainID, cmt.BlockID, - hdr.Height, cmt) -} - -// Height returns the height of the header. -func (fc FullCommit) Height() int64 { - if fc.SignedHeader.Header == nil { - panic("should not happen") - } - return fc.SignedHeader.Height -} - -// ChainID returns the chainID of the header. -func (fc FullCommit) ChainID() string { - if fc.SignedHeader.Header == nil { - panic("should not happen") - } - return fc.SignedHeader.ChainID -} diff --git a/lite/dbprovider.go b/lite/dbprovider.go deleted file mode 100644 index 9b1580314..000000000 --- a/lite/dbprovider.go +++ /dev/null @@ -1,285 +0,0 @@ -package lite - -import ( - "fmt" - "regexp" - "strconv" - - amino "github.com/tendermint/go-amino" - dbm "github.com/tendermint/tm-db" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" - log "github.com/tendermint/tendermint/libs/log" - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -var _ PersistentProvider = (*DBProvider)(nil) - -// DBProvider stores commits and validator sets in a DB. -type DBProvider struct { - logger log.Logger - label string - db dbm.DB - cdc *amino.Codec - limit int -} - -func NewDBProvider(label string, db dbm.DB) *DBProvider { - - // NOTE: when debugging, this type of construction might be useful. - //db = dbm.NewDebugDB("db provider "+tmrand.Str(4), db) - - cdc := amino.NewCodec() - cryptoamino.RegisterAmino(cdc) - dbp := &DBProvider{ - logger: log.NewNopLogger(), - label: label, - db: db, - cdc: cdc, - } - return dbp -} - -func (dbp *DBProvider) SetLogger(logger log.Logger) { - dbp.logger = logger.With("label", dbp.label) -} - -func (dbp *DBProvider) SetLimit(limit int) *DBProvider { - dbp.limit = limit - return dbp -} - -// Implements PersistentProvider. -func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { - - dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) - batch := dbp.db.NewBatch() - defer batch.Close() - - // Save the fc.validators. - // We might be overwriting what we already have, but - // it makes the logic easier for now. - vsKey := validatorSetKey(fc.ChainID(), fc.Height()) - vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators) - if err != nil { - return err - } - batch.Set(vsKey, vsBz) - - // Save the fc.NextValidators. - nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) - nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators) - if err != nil { - return err - } - batch.Set(nvsKey, nvsBz) - - // Save the fc.SignedHeader - shKey := signedHeaderKey(fc.ChainID(), fc.Height()) - shBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.SignedHeader) - if err != nil { - return err - } - batch.Set(shKey, shBz) - - // And write sync. - batch.WriteSync() - - // Garbage collect. - // TODO: optimize later. - if dbp.limit > 0 { - dbp.deleteAfterN(fc.ChainID(), dbp.limit) - } - - return nil -} - -// Implements Provider. -func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( - FullCommit, error) { - - dbp.logger.Info("DBProvider.LatestFullCommit()...", - "chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight) - - if minHeight <= 0 { - minHeight = 1 - } - if maxHeight == 0 { - maxHeight = 1<<63 - 1 - } - - itr, err := dbp.db.ReverseIterator( - signedHeaderKey(chainID, minHeight), - append(signedHeaderKey(chainID, maxHeight), byte(0x00)), - ) - if err != nil { - panic(err) - } - defer itr.Close() - - for itr.Valid() { - key := itr.Key() - _, _, ok := parseSignedHeaderKey(key) - if !ok { - // Skip over other keys. - itr.Next() - continue - } else { - // Found the latest full commit signed header. - shBz := itr.Value() - sh := types.SignedHeader{} - err := dbp.cdc.UnmarshalBinaryLengthPrefixed(shBz, &sh) - if err != nil { - return FullCommit{}, err - } - - lfc, err := dbp.fillFullCommit(sh) - if err == nil { - dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height()) - return lfc, nil - } - - dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc) - dbp.logger.Error(fmt.Sprintf("%+v", err)) - return lfc, err - - } - } - return FullCommit{}, lerr.ErrCommitNotFound() -} - -func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return dbp.getValidatorSet(chainID, height) -} - -func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - vsBz, err := dbp.db.Get(validatorSetKey(chainID, height)) - if err != nil { - return nil, err - } - if len(vsBz) == 0 { - err = lerr.ErrUnknownValidators(chainID, height) - return - } - err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &valset) - if err != nil { - return - } - - // To test deep equality. This makes it easier to test for e.g. valset - // equivalence using assert.Equal (tests for deep equality) in our tests, - // which also tests for unexported/private field equivalence. - valset.TotalVotingPower() - - return -} - -func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { - var chainID = sh.ChainID - var height = sh.Height - var valset, nextValset *types.ValidatorSet - // Load the validator set. - valset, err := dbp.getValidatorSet(chainID, height) - if err != nil { - return FullCommit{}, err - } - // Load the next validator set. - nextValset, err = dbp.getValidatorSet(chainID, height+1) - if err != nil { - return FullCommit{}, err - } - // Return filled FullCommit. - return FullCommit{ - SignedHeader: sh, - Validators: valset, - NextValidators: nextValset, - }, nil -} - -func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { - - dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after) - - itr, err := dbp.db.ReverseIterator( - signedHeaderKey(chainID, 1), - append(signedHeaderKey(chainID, 1<<63-1), byte(0x00)), - ) - if err != nil { - panic(err) - } - defer itr.Close() - - var lastHeight int64 = 1<<63 - 1 - var numSeen = 0 - var numDeleted = 0 - - for itr.Valid() { - key := itr.Key() - _, height, ok := parseChainKeyPrefix(key) - if !ok { - return fmt.Errorf("unexpected key %v", key) - } - - if height < lastHeight { - lastHeight = height - numSeen++ - } - if numSeen > after { - dbp.db.Delete(key) - numDeleted++ - } - - itr.Next() - } - - dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted)) - return nil -} - -//---------------------------------------- -// key encoding - -func signedHeaderKey(chainID string, height int64) []byte { - return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) -} - -func validatorSetKey(chainID string, height int64) []byte { - return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) -} - -//---------------------------------------- -// key parsing - -var keyPattern = regexp.MustCompile(`^([^/]+)/([0-9]*)/(.*)$`) - -func parseKey(key []byte) (chainID string, height int64, part string, ok bool) { - submatch := keyPattern.FindSubmatch(key) - if submatch == nil { - return "", 0, "", false - } - chainID = string(submatch[1]) - heightStr := string(submatch[2]) - heightInt, err := strconv.Atoi(heightStr) - if err != nil { - return "", 0, "", false - } - height = int64(heightInt) - part = string(submatch[3]) - ok = true // good! - return -} - -func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { - var part string - chainID, height, part, ok = parseKey(key) - if part != "sh" { - return "", 0, false - } - return -} - -func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { - chainID, height, _, ok = parseKey(key) - return -} diff --git a/lite/doc.go b/lite/doc.go deleted file mode 100644 index ecb8d4cf6..000000000 --- a/lite/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package lite is deprecated and will be removed in v0.34! - -Package lite allows you to securely validate headers without a full node. - -This library pulls together all the crypto and algorithms, so given a -relatively recent (< unbonding period) known validator set, one can get -indisputable proof that data is in the chain (current state) or detect if the -node is lying to the client. - -Tendermint RPC exposes a lot of info, but a malicious node could return any -data it wants to queries, or even to block headers, even making up fake -signatures from non-existent validators to justify it. This is a lot of logic -to get right, to be contained in a small, easy to use library, that does this -for you, so you can just build nice applications. - -We design for clients who have no strong trust relationship with any Tendermint -node, just the blockchain and validator set as a whole. - -SignedHeader - -SignedHeader is a block header along with a commit -- enough validator -precommit-vote signatures to prove its validity (> 2/3 of the voting power) -given the validator set responsible for signing that header. A FullCommit is a -SignedHeader along with the current and next validator sets. - -The hash of the next validator set is included and signed in the SignedHeader. -This lets the lite client keep track of arbitrary changes to the validator set, -as every change to the validator set must be approved by inclusion in the -header and signed in the commit. - -In the worst case, with every block changing the validators around completely, -a lite client can sync up with every block header to verify each validator set -change on the chain. In practice, most applications will not have frequent -drastic updates to the validator set, so the logic defined in this package for -lite client syncing is optimized to use intelligent bisection and -block-skipping for efficient sourcing and verification of these data structures -and updates to the validator set (see the DynamicVerifier for more -information). - -The FullCommit is also declared in this package as a convenience structure, -which includes the SignedHeader along with the full current and next -ValidatorSets. - -Verifier - -A Verifier validates a new SignedHeader given the currently known state. There -are two different types of Verifiers provided. - -BaseVerifier - given a validator set and a height, this Verifier verifies -that > 2/3 of the voting power of the given validator set had signed the -SignedHeader, and that the SignedHeader was to be signed by the exact given -validator set, and that the height of the commit is at least height (or -greater). - -DynamicVerifier - this Verifier implements an auto-update and persistence -strategy to verify any SignedHeader of the blockchain. - -Provider and PersistentProvider - -A Provider allows us to store and retrieve the FullCommits. - - type Provider interface { - // LatestFullCommit returns the latest commit with - // minHeight <= height <= maxHeight. - // If maxHeight is zero, returns the latest where - // minHeight <= height. - LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) - } - -* client.NewHTTPProvider - query Tendermint rpc. - -A PersistentProvider is a Provider that also allows for saving state. This is -used by the DynamicVerifier for persistence. - - type PersistentProvider interface { - Provider - - // SaveFullCommit saves a FullCommit (without verification). - SaveFullCommit(fc FullCommit) error - } - -* DBProvider - persistence provider for use with any libs/DB. - -* MultiProvider - combine multiple providers. - -The suggested use for local light clients is client.NewHTTPProvider(...) for -getting new data (Source), and NewMultiProvider(NewDBProvider("label", -dbm.NewMemDB()), NewDBProvider("label", db.NewFileDB(...))) to store confirmed -full commits (Trusted) - - -How We Track Validators - -Unless you want to blindly trust the node you talk with, you need to trace -every response back to a hash in a block header and validate the commit -signatures of that block header match the proper validator set. If there is a -static validator set, you store it locally upon initialization of the client, -and check against that every time. - -If the validator set for the blockchain is dynamic, verifying block commits is -a bit more involved -- if there is a block at height H with a known (trusted) -validator set V, and another block at height H' (H' > H) with validator set V' -!= V, then we want a way to safely update it. - -First, we get the new (unconfirmed) validator set V' and verify that H' is -internally consistent and properly signed by this V'. Assuming it is a valid -block, we check that at least 2/3 of the validators in V also signed it, -meaning it would also be valid under our old assumptions. Then, we accept H' -and V' as valid and trusted and use that to validate for heights X > H' until a -more recent and updated validator set is found. - -If we cannot update directly from H -> H' because there was too much change to -the validator set, then we can look for some Hm (H < Hm < H') with a validator -set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one -of these steps doesn't work, then we continue bisecting, until we eventually -have to externally validate the validator set changes at every block. - -Since we never trust any server in this protocol, only the signatures -themselves, it doesn't matter if the seed comes from a (possibly malicious) -node or a (possibly malicious) user. We can accept it or reject it based only -on our trusted validator set and cryptographic proofs. This makes it extremely -important to verify that you have the proper validator set when initializing -the client, as that is the root of all trust. - -The software currently assumes that the unbonding period is infinite in -duration. If the DynamicVerifier hasn't been updated in a while, you should -manually verify the block headers using other sources. - -TODO: Update the software to handle cases around the unbonding period. - -*/ -package lite diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go deleted file mode 100644 index d4efdcbeb..000000000 --- a/lite/dynamic_verifier.go +++ /dev/null @@ -1,275 +0,0 @@ -package lite - -import ( - "bytes" - "fmt" - "sync" - - log "github.com/tendermint/tendermint/libs/log" - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -const sizeOfPendingMap = 1024 - -var _ Verifier = (*DynamicVerifier)(nil) - -// DynamicVerifier implements an auto-updating Verifier. It uses a -// "source" provider to obtain the needed FullCommits to securely sync with -// validator set changes. It stores properly validated data on the -// "trusted" local system. -// TODO: make this single threaded and create a new -// ConcurrentDynamicVerifier that wraps it with concurrency. -// see https://github.com/tendermint/tendermint/issues/3170 -type DynamicVerifier struct { - chainID string - logger log.Logger - - // Already validated, stored locally - trusted PersistentProvider - - // New info, like a node rpc, or other import method. - source Provider - - // pending map to synchronize concurrent verification requests - mtx sync.Mutex - pendingVerifications map[int64]chan struct{} -} - -// NewDynamicVerifier returns a new DynamicVerifier. It uses the -// trusted provider to store validated data and the source provider to -// obtain missing data (e.g. FullCommits). -// -// The trusted provider should be a DBProvider. -// The source provider should be a client.HTTPProvider. -func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { - return &DynamicVerifier{ - logger: log.NewNopLogger(), - chainID: chainID, - trusted: trusted, - source: source, - pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap), - } -} - -func (dv *DynamicVerifier) SetLogger(logger log.Logger) { - logger = logger.With("module", "lite") - dv.logger = logger - dv.trusted.SetLogger(logger) - dv.source.SetLogger(logger) -} - -// Implements Verifier. -func (dv *DynamicVerifier) ChainID() string { - return dv.chainID -} - -// Implements Verifier. -// -// If the validators have changed since the last known time, it looks to -// dv.trusted and dv.source to prove the new validators. On success, it will -// try to store the SignedHeader in dv.trusted if the next -// validator can be sourced. -func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { - - // Performs synchronization for multi-threads verification at the same height. - dv.mtx.Lock() - if pending := dv.pendingVerifications[shdr.Height]; pending != nil { - dv.mtx.Unlock() - <-pending // pending is chan struct{} - } else { - pending := make(chan struct{}) - dv.pendingVerifications[shdr.Height] = pending - defer func() { - close(pending) - dv.mtx.Lock() - delete(dv.pendingVerifications, shdr.Height) - dv.mtx.Unlock() - }() - dv.mtx.Unlock() - } - - //Get the exact trusted commit for h, and if it is - // equal to shdr, then it's already trusted, so - // just return nil. - trustedFCSameHeight, err := dv.trusted.LatestFullCommit(dv.chainID, shdr.Height, shdr.Height) - if err == nil { - // If loading trust commit successfully, and trust commit equal to shdr, then don't verify it, - // just return nil. - if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) { - dv.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height)) - return nil - } - } else if !lerr.IsErrCommitNotFound(err) { - // Return error if it is not CommitNotFound error - dv.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height)) - return err - } - - // Get the latest known full commit <= h-1 from our trusted providers. - // The full commit at h-1 contains the valset to sign for h. - prevHeight := shdr.Height - 1 - trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight) - if err != nil { - return err - } - - // sync up to the prevHeight and assert our latest NextValidatorSet - // is the ValidatorSet for the SignedHeader - if trustedFC.Height() == prevHeight { - // Return error if valset doesn't match. - if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { - return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) - } - } else { - // If valset doesn't match, try to update - if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { - // ... update. - trustedFC, err = dv.updateToHeight(prevHeight) - if err != nil { - return err - } - // Return error if valset _still_ doesn't match. - if !bytes.Equal(trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { - return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) - } - } - } - - // Verify the signed header using the matching valset. - cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators) - err = cert.Verify(shdr) - if err != nil { - return err - } - - // By now, the SignedHeader is fully validated and we're synced up to - // SignedHeader.Height - 1. To sync to SignedHeader.Height, we need - // the validator set at SignedHeader.Height + 1 so we can verify the - // SignedHeader.NextValidatorSet. - // TODO: is the ValidateFull below mostly redundant with the BaseVerifier.Verify above? - // See https://github.com/tendermint/tendermint/issues/3174. - - // Get the next validator set. - nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1) - if lerr.IsErrUnknownValidators(err) { - // Ignore this error. - return nil - } else if err != nil { - return err - } - - // Create filled FullCommit. - nfc := FullCommit{ - SignedHeader: shdr, - Validators: trustedFC.NextValidators, - NextValidators: nextValset, - } - // Validate the full commit. This checks the cryptographic - // signatures of Commit against Validators. - if err := nfc.ValidateFull(dv.chainID); err != nil { - return err - } - // Trust it. - return dv.trusted.SaveFullCommit(nfc) -} - -// verifyAndSave will verify if this is a valid source full commit given the -// best match trusted full commit, and if good, persist to dv.trusted. -// Returns ErrNotEnoughVotingPowerSigned when >2/3 of trustedFC did not sign sourceFC. -// Panics if trustedFC.Height() >= sourceFC.Height(). -func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { - if trustedFC.Height() >= sourceFC.Height() { - panic("should not happen") - } - err := trustedFC.NextValidators.VerifyFutureCommit( - sourceFC.Validators, - dv.chainID, sourceFC.SignedHeader.Commit.BlockID, - sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, - ) - if err != nil { - return err - } - - return dv.trusted.SaveFullCommit(sourceFC) -} - -// updateToHeight will use divide-and-conquer to find a path to h. -// Returns nil error iff we successfully verify and persist a full commit -// for height h, using repeated applications of bisection if necessary. -// -// Returns ErrCommitNotFound if source provider doesn't have the commit for h. -func (dv *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) { - - // Fetch latest full commit from source. - sourceFC, err := dv.source.LatestFullCommit(dv.chainID, h, h) - if err != nil { - return FullCommit{}, err - } - - // If sourceFC.Height() != h, we can't do it. - if sourceFC.Height() != h { - return FullCommit{}, lerr.ErrCommitNotFound() - } - - // Validate the full commit. This checks the cryptographic - // signatures of Commit against Validators. - if err := sourceFC.ValidateFull(dv.chainID); err != nil { - return FullCommit{}, err - } - - // Verify latest FullCommit against trusted FullCommits -FOR_LOOP: - for { - // Fetch latest full commit from trusted. - trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, h) - if err != nil { - return FullCommit{}, err - } - // We have nothing to do. - if trustedFC.Height() == h { - return trustedFC, nil - } - - // Try to update to full commit with checks. - err = dv.verifyAndSave(trustedFC, sourceFC) - if err == nil { - // All good! - return sourceFC, nil - } - - // Handle special case when err is ErrNotEnoughVotingPowerSigned. - if types.IsErrNotEnoughVotingPowerSigned(err) { - // Divide and conquer. - start, end := trustedFC.Height(), sourceFC.Height() - if !(start < end) { - panic("should not happen") - } - mid := (start + end) / 2 - _, err = dv.updateToHeight(mid) - if err != nil { - return FullCommit{}, err - } - // If we made it to mid, we retry. - continue FOR_LOOP - } - return FullCommit{}, err - } -} - -func (dv *DynamicVerifier) LastTrustedHeight() int64 { - fc, err := dv.trusted.LatestFullCommit(dv.chainID, 1, 1<<63-1) - if err != nil { - panic("should not happen") - } - return fc.Height() -} diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go deleted file mode 100644 index fdb89052b..000000000 --- a/lite/dynamic_verifier_test.go +++ /dev/null @@ -1,280 +0,0 @@ -package lite - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - dbm "github.com/tendermint/tm-db" - - log "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -const testChainID = "inquiry-test" - -func TestInquirerValidPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := NewDBProvider("trust", dbm.NewMemDB()) - source := NewDBProvider("source", dbm.NewMemDB()) - - // Set up the validators to generate test blocks. - var vote int64 = 10 - keys := genPrivKeys(5) - nkeys := keys.Extend(1) - - // Construct a bunch of commits, each with one more height than the last. - chainID := testChainID - consHash := []byte("params") - resHash := []byte("results") - count := 50 - fcz := make([]FullCommit, count) - for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - fcz[i] = keys.GenFullCommit( - chainID, h, nil, - vals, nextVals, - appHash, consHash, resHash, 0, len(keys)) - // Extend the keys by 1 each time. - keys = nkeys - nkeys = nkeys.Extend(1) - } - - // Initialize a Verifier with the initial state. - err := trust.SaveFullCommit(fcz[0]) - require.Nil(err) - cert := NewDynamicVerifier(chainID, trust, source) - cert.SetLogger(log.TestingLogger()) - - // This should fail validation: - sh := fcz[count-1].SignedHeader - err = cert.Verify(sh) - require.NotNil(err) - - // Adding a few commits in the middle should be insufficient. - for i := 10; i < 13; i++ { - err := source.SaveFullCommit(fcz[i]) - require.Nil(err) - } - err = cert.Verify(sh) - assert.NotNil(err) - - // With more info, we succeed. - for i := 0; i < count; i++ { - err := source.SaveFullCommit(fcz[i]) - require.Nil(err) - } - err = cert.Verify(sh) - assert.Nil(err, "%+v", err) -} - -func TestDynamicVerify(t *testing.T) { - trust := NewDBProvider("trust", dbm.NewMemDB()) - source := NewDBProvider("source", dbm.NewMemDB()) - - // 10 commits with one valset, 1 to change, - // 10 commits with the next one - n1, n2 := 10, 10 - nCommits := n1 + n2 + 1 - maxHeight := int64(nCommits) - fcz := make([]FullCommit, nCommits) - - // gen the 2 val sets - chainID := "dynamic-verifier" - power := int64(10) - keys1 := genPrivKeys(5) - vals1 := keys1.ToValidators(power, 0) - keys2 := genPrivKeys(5) - vals2 := keys2.ToValidators(power, 0) - - // make some commits with the first - for i := 0; i < n1; i++ { - fcz[i] = makeFullCommit(int64(i), keys1, vals1, vals1, chainID) - } - - // update the val set - fcz[n1] = makeFullCommit(int64(n1), keys1, vals1, vals2, chainID) - - // make some commits with the new one - for i := n1 + 1; i < nCommits; i++ { - fcz[i] = makeFullCommit(int64(i), keys2, vals2, vals2, chainID) - } - - // Save everything in the source - for _, fc := range fcz { - source.SaveFullCommit(fc) - } - - // Initialize a Verifier with the initial state. - err := trust.SaveFullCommit(fcz[0]) - require.Nil(t, err) - ver := NewDynamicVerifier(chainID, trust, source) - ver.SetLogger(log.TestingLogger()) - - // fetch the latest from the source - latestFC, err := source.LatestFullCommit(chainID, 1, maxHeight) - require.NoError(t, err) - - // try to update to the latest - err = ver.Verify(latestFC.SignedHeader) - require.NoError(t, err) - -} - -func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit { - height++ - consHash := []byte("special-params") - appHash := []byte(fmt.Sprintf("h=%d", height)) - resHash := []byte(fmt.Sprintf("res=%d", height)) - return keys.GenFullCommit( - chainID, height, nil, - vals, nextVals, - appHash, consHash, resHash, 0, len(keys)) -} - -func TestInquirerVerifyHistorical(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := NewDBProvider("trust", dbm.NewMemDB()) - source := NewDBProvider("source", dbm.NewMemDB()) - - // Set up the validators to generate test blocks. - var vote int64 = 10 - keys := genPrivKeys(5) - nkeys := keys.Extend(1) - - // Construct a bunch of commits, each with one more height than the last. - chainID := testChainID - count := 10 - consHash := []byte("special-params") - fcz := make([]FullCommit, count) - for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - fcz[i] = keys.GenFullCommit( - chainID, h, nil, - vals, nextVals, - appHash, consHash, resHash, 0, len(keys)) - // Extend the keys by 1 each time. - keys = nkeys - nkeys = nkeys.Extend(1) - } - - // Initialize a Verifier with the initial state. - err := trust.SaveFullCommit(fcz[0]) - require.Nil(err) - cert := NewDynamicVerifier(chainID, trust, source) - cert.SetLogger(log.TestingLogger()) - - // Store a few full commits as trust. - for _, i := range []int{2, 5} { - trust.SaveFullCommit(fcz[i]) - } - - // See if we can jump forward using trusted full commits. - // Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change. - err = source.SaveFullCommit(fcz[7]) - require.Nil(err, "%+v", err) - sh := fcz[8].SignedHeader - err = cert.Verify(sh) - require.Nil(err, "%+v", err) - assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) - commit, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) - require.NotNil(err, "%+v", err) - assert.Equal(commit, (FullCommit{})) - - // With fcz[9] Verify will update last trusted height. - err = source.SaveFullCommit(fcz[9]) - require.Nil(err, "%+v", err) - sh = fcz[8].SignedHeader - err = cert.Verify(sh) - require.Nil(err, "%+v", err) - assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) - commit, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) - require.Nil(err, "%+v", err) - assert.Equal(commit.Height(), fcz[8].Height()) - - // Add access to all full commits via untrusted source. - for i := 0; i < count; i++ { - err := source.SaveFullCommit(fcz[i]) - require.Nil(err) - } - - // Try to check an unknown seed in the past. - sh = fcz[3].SignedHeader - err = cert.Verify(sh) - require.Nil(err, "%+v", err) - assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) - - // Jump all the way forward again. - sh = fcz[count-1].SignedHeader - err = cert.Verify(sh) - require.Nil(err, "%+v", err) - assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) -} - -func TestConcurrencyInquirerVerify(t *testing.T) { - _, require := assert.New(t), require.New(t) - trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10) - source := NewDBProvider("source", dbm.NewMemDB()) - - // Set up the validators to generate test blocks. - var vote int64 = 10 - keys := genPrivKeys(5) - nkeys := keys.Extend(1) - - // Construct a bunch of commits, each with one more height than the last. - chainID := testChainID - count := 10 - consHash := []byte("special-params") - fcz := make([]FullCommit, count) - for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - fcz[i] = keys.GenFullCommit( - chainID, h, nil, - vals, nextVals, - appHash, consHash, resHash, 0, len(keys)) - // Extend the keys by 1 each time. - keys = nkeys - nkeys = nkeys.Extend(1) - } - - // Initialize a Verifier with the initial state. - err := trust.SaveFullCommit(fcz[0]) - require.Nil(err) - cert := NewDynamicVerifier(chainID, trust, source) - cert.SetLogger(log.TestingLogger()) - - err = source.SaveFullCommit(fcz[7]) - require.Nil(err, "%+v", err) - err = source.SaveFullCommit(fcz[8]) - require.Nil(err, "%+v", err) - sh := fcz[8].SignedHeader - - var wg sync.WaitGroup - count = 100 - errList := make([]error, count) - for i := 0; i < count; i++ { - wg.Add(1) - go func(index int) { - errList[index] = cert.Verify(sh) - defer wg.Done() - }(i) - } - wg.Wait() - for _, err := range errList { - require.Nil(err) - } -} diff --git a/lite/errors/errors.go b/lite/errors/errors.go deleted file mode 100644 index 5bb829b0a..000000000 --- a/lite/errors/errors.go +++ /dev/null @@ -1,99 +0,0 @@ -package errors - -import ( - "fmt" - - "github.com/pkg/errors" -) - -//---------------------------------------- -// Error types - -type errCommitNotFound struct{} - -func (e errCommitNotFound) Error() string { - return "Commit not found by provider" -} - -type errUnexpectedValidators struct { - got []byte - want []byte -} - -func (e errUnexpectedValidators) Error() string { - return fmt.Sprintf("Validator set is different. Got %X want %X", - e.got, e.want) -} - -type errUnknownValidators struct { - chainID string - height int64 -} - -func (e errUnknownValidators) Error() string { - return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", - e.chainID, e.height) -} - -type errEmptyTree struct{} - -func (e errEmptyTree) Error() string { - return "Tree is empty" -} - -//---------------------------------------- -// Methods for above error types - -//----------------- -// ErrCommitNotFound - -// ErrCommitNotFound indicates that a the requested commit was not found. -func ErrCommitNotFound() error { - return errors.Wrap(errCommitNotFound{}, "") -} - -func IsErrCommitNotFound(err error) bool { - _, ok := errors.Cause(err).(errCommitNotFound) - return ok -} - -//----------------- -// ErrUnexpectedValidators - -// ErrUnexpectedValidators indicates a validator set mismatch. -func ErrUnexpectedValidators(got, want []byte) error { - return errors.Wrap(errUnexpectedValidators{ - got: got, - want: want, - }, "") -} - -func IsErrUnexpectedValidators(err error) bool { - _, ok := errors.Cause(err).(errUnexpectedValidators) - return ok -} - -//----------------- -// ErrUnknownValidators - -// ErrUnknownValidators indicates that some validator set was missing or unknown. -func ErrUnknownValidators(chainID string, height int64) error { - return errors.Wrap(errUnknownValidators{chainID, height}, "") -} - -func IsErrUnknownValidators(err error) bool { - _, ok := errors.Cause(err).(errUnknownValidators) - return ok -} - -//----------------- -// ErrEmptyTree - -func ErrEmptyTree() error { - return errors.Wrap(errEmptyTree{}, "") -} - -func IsErrEmptyTree(err error) bool { - _, ok := errors.Cause(err).(errEmptyTree) - return ok -} diff --git a/lite/helpers.go b/lite/helpers.go deleted file mode 100644 index 29dd50b5b..000000000 --- a/lite/helpers.go +++ /dev/null @@ -1,159 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" - - "github.com/tendermint/tendermint/types" - tmtime "github.com/tendermint/tendermint/types/time" -) - -// PrivKeys is a helper type for testing. -// -// It lets us simulate signing with many keys. The main use case is to create -// a set, and call GenSignedHeader to get properly signed header for testing. -// -// You can set different weights of validators each time you call ToValidators, -// and can optionally extend the validator set later with Extend. -type privKeys []crypto.PrivKey - -// genPrivKeys produces an array of private keys to generate commits. -func genPrivKeys(n int) privKeys { - res := make(privKeys, n) - for i := range res { - res[i] = ed25519.GenPrivKey() - } - return res -} - -// Change replaces the key at index i. -func (pkz privKeys) Change(i int) privKeys { - res := make(privKeys, len(pkz)) - copy(res, pkz) - res[i] = ed25519.GenPrivKey() - return res -} - -// Extend adds n more keys (to remove, just take a slice). -func (pkz privKeys) Extend(n int) privKeys { - extra := genPrivKeys(n) - return append(pkz, extra...) -} - -// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits. -func genSecpPrivKeys(n int) privKeys { - res := make(privKeys, n) - for i := range res { - res[i] = secp256k1.GenPrivKey() - } - return res -} - -// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (pkz privKeys) ExtendSecp(n int) privKeys { - extra := genSecpPrivKeys(n) - return append(pkz, extra...) -} - -// ToValidators produces a valset from the set of keys. -// The first key has weight `init` and it increases by `inc` every step -// so we can have all the same weight, or a simple linear distribution -// (should be enough for testing). -func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(pkz)) - for i, k := range pkz { - res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) - } - return types.NewValidatorSet(res) -} - -// signHeader properly signs the header with all keys from first to last exclusive. -func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { - commitSigs := make([]types.CommitSig, len(pkz)) - for i := 0; i < len(pkz); i++ { - commitSigs[i] = types.NewCommitSigAbsent() - } - - // We need this list to keep the ordering. - vset := pkz.ToValidators(1, 0) - - blockID := types.BlockID{ - Hash: header.Hash(), - PartsHeader: types.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)}, - } - - // Fill in the votes we want. - for i := first; i < last && i < len(pkz); i++ { - vote := makeVote(header, vset, pkz[i], blockID) - commitSigs[vote.ValidatorIndex] = vote.CommitSig() - } - - return types.NewCommit(header.Height, 1, blockID, commitSigs) -} - -func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey, blockID types.BlockID) *types.Vote { - addr := key.PubKey().Address() - idx, _ := valset.GetByAddress(addr) - vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, - Height: header.Height, - Round: 1, - Timestamp: tmtime.Now(), - Type: types.PrecommitType, - BlockID: blockID, - } - // Sign it - signBytes := vote.SignBytes(header.ChainID) - // TODO Consider reworking makeVote API to return an error - sig, err := key.Sign(signBytes) - if err != nil { - panic(err) - } - vote.Signature = sig - - return vote -} - -func genHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { - - return &types.Header{ - ChainID: chainID, - Height: height, - Time: tmtime.Now(), - // LastBlockID - // LastCommitHash - ValidatorsHash: valset.Hash(), - NextValidatorsHash: nextValset.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, - } -} - -// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - - header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) - check := types.SignedHeader{ - Header: header, - Commit: pkz.signHeader(header, first, last), - } - return check -} - -// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. -func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - - header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) - commit := types.SignedHeader{ - Header: header, - Commit: pkz.signHeader(header, first, last), - } - return NewFullCommit(commit, valset, nextValset) -} diff --git a/lite/multiprovider.go b/lite/multiprovider.go deleted file mode 100644 index 364647a40..000000000 --- a/lite/multiprovider.go +++ /dev/null @@ -1,85 +0,0 @@ -package lite - -import ( - log "github.com/tendermint/tendermint/libs/log" - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -var _ PersistentProvider = (*multiProvider)(nil) - -// multiProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -type multiProvider struct { - logger log.Logger - providers []PersistentProvider -} - -// NewMultiProvider returns a new provider which wraps multiple other providers. -func NewMultiProvider(providers ...PersistentProvider) PersistentProvider { - return &multiProvider{ - logger: log.NewNopLogger(), - providers: providers, - } -} - -// SetLogger sets logger on self and all subproviders. -func (mc *multiProvider) SetLogger(logger log.Logger) { - mc.logger = logger - for _, p := range mc.providers { - p.SetLogger(logger) - } -} - -// SaveFullCommit saves on all providers, and aborts on the first error. -func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) { - for _, p := range mc.providers { - err = p.SaveFullCommit(fc) - if err != nil { - return - } - } - return -} - -// LatestFullCommit loads the latest from all providers and provides -// the latest FullCommit that satisfies the conditions. -// Returns the first error encountered. -func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { - for _, p := range mc.providers { - var commit FullCommit - commit, err = p.LatestFullCommit(chainID, minHeight, maxHeight) - if lerr.IsErrCommitNotFound(err) { - err = nil - continue - } else if err != nil { - return - } - if fc == (FullCommit{}) { - fc = commit - } else if commit.Height() > fc.Height() { - fc = commit - } - if fc.Height() == maxHeight { - return - } - } - if fc == (FullCommit{}) { - err = lerr.ErrCommitNotFound() - return - } - return -} - -// ValidatorSet returns validator set at height as provided by the first -// provider which has it, or an error otherwise. -func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - for _, p := range mc.providers { - valset, err = p.ValidatorSet(chainID, height) - if err == nil { - // TODO Log unexpected types of errors. - return valset, nil - } - } - return nil, lerr.ErrUnknownValidators(chainID, height) -} diff --git a/lite/provider.go b/lite/provider.go deleted file mode 100644 index ebab16264..000000000 --- a/lite/provider.go +++ /dev/null @@ -1,32 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -// Provider provides information for the lite client to sync validators. -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. -type Provider interface { - - // LatestFullCommit returns the latest commit with minHeight <= height <= - // maxHeight. - // If maxHeight is zero, returns the latest where minHeight <= height. - LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) - - // Get the valset that corresponds to chainID and height and return. - // Height must be >= 1. - ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) - - // Set a logger. - SetLogger(logger log.Logger) -} - -// A provider that can also persist new information. -// Examples: MemProvider, files.Provider, CacheProvider. -type PersistentProvider interface { - Provider - - // SaveFullCommit saves a FullCommit (without verification). - SaveFullCommit(fc FullCommit) error -} diff --git a/lite/provider_test.go b/lite/provider_test.go deleted file mode 100644 index b820418ff..000000000 --- a/lite/provider_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package lite - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - dbm "github.com/tendermint/tm-db" - - log "github.com/tendermint/tendermint/libs/log" - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -// missingProvider doesn't store anything, always a miss. -// Designed as a mock for testing. -type missingProvider struct{} - -// NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() PersistentProvider { - return missingProvider{} -} - -func (missingProvider) SaveFullCommit(FullCommit) error { return nil } -func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { - return FullCommit{}, lerr.ErrCommitNotFound() -} -func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { - return nil, errors.New("missing validator set") -} -func (missingProvider) SetLogger(_ log.Logger) {} - -func TestMemProvider(t *testing.T) { - p := NewDBProvider("mem", dbm.NewMemDB()) - checkProvider(t, p, "test-mem", "empty") -} - -func TestMultiProvider(t *testing.T) { - p := NewMultiProvider( - NewMissingProvider(), - NewDBProvider("mem", dbm.NewMemDB()), - NewMissingProvider(), - ) - checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") -} - -func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { - assert, require := assert.New(t), require.New(t) - appHash := []byte(app) - keys := genPrivKeys(5) - count := 10 - - // Make a bunch of full commits. - fcz := make([]FullCommit, count) - for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) - } - - // Check that provider is initially empty. - fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) - require.NotNil(err) - assert.True(lerr.IsErrCommitNotFound(err)) - - // Save all full commits to the provider. - for _, fc := range fcz { - err = p.SaveFullCommit(fc) - require.Nil(err) - // Make sure we can get it back. - fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) - assert.Nil(err) - assert.Equal(fc.SignedHeader, fc2.SignedHeader) - assert.Equal(fc.Validators, fc2.Validators) - assert.Equal(fc.NextValidators, fc2.NextValidators) - } - - // Make sure we get the last hash if we overstep. - fc, err = p.LatestFullCommit(chainID, 1, 5000) - if assert.Nil(err) { - assert.Equal(fcz[count-1].Height(), fc.Height()) - assert.Equal(fcz[count-1], fc) - } - - // ... and middle ones as well. - fc, err = p.LatestFullCommit(chainID, 1, 47) - if assert.Nil(err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, fc.Height()) - } - -} - -// This will make a get height, and if it is good, set the data as well. -func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) { - fc, err := p.LatestFullCommit(chainID, 1, ask) - require.Nil(t, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.SaveFullCommit(fc) - require.Nil(t, err) - } -} - -func TestMultiLatestFullCommit(t *testing.T) { - require := require.New(t) - - // We will write data to the second level of the cache (p2), and see what - // gets cached/stored in. - p := NewDBProvider("mem1", dbm.NewMemDB()) - p2 := NewDBProvider("mem2", dbm.NewMemDB()) - cp := NewMultiProvider(p, p2) - - chainID := "cache-best-height" - appHash := []byte("01234567") - keys := genPrivKeys(5) - count := 10 - - // Set a bunch of full commits. - for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) - h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.SaveFullCommit(fc) - require.NoError(err) - } - - // Get a few heights from the cache and set them proper. - checkLatestFullCommit(t, cp, chainID, 57, 50) - checkLatestFullCommit(t, cp, chainID, 33, 30) - - // make sure they are set in p as well (but nothing else) - checkLatestFullCommit(t, p, chainID, 44, 30) - checkLatestFullCommit(t, p, chainID, 50, 50) - checkLatestFullCommit(t, p, chainID, 99, 50) - - // now, query the cache for a higher value - checkLatestFullCommit(t, p2, chainID, 99, 90) - checkLatestFullCommit(t, cp, chainID, 99, 90) -} diff --git a/lite/proxy/block.go b/lite/proxy/block.go deleted file mode 100644 index d154bf333..000000000 --- a/lite/proxy/block.go +++ /dev/null @@ -1,48 +0,0 @@ -package proxy - -import ( - "bytes" - "errors" - - "github.com/tendermint/tendermint/types" -) - -func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error { - if meta == nil { - return errors.New("expecting a non-nil BlockMeta") - } - // TODO: check the BlockID?? - return ValidateHeader(&meta.Header, sh) -} - -func ValidateBlock(meta *types.Block, sh types.SignedHeader) error { - if meta == nil { - return errors.New("expecting a non-nil Block") - } - err := ValidateHeader(&meta.Header, sh) - if err != nil { - return err - } - if !bytes.Equal(meta.Data.Hash(), meta.Header.DataHash) { - return errors.New("data hash doesn't match header") - } - return nil -} - -func ValidateHeader(head *types.Header, sh types.SignedHeader) error { - if head == nil { - return errors.New("expecting a non-nil Header") - } - if sh.Header == nil { - return errors.New("unexpected empty SignedHeader") - } - // Make sure they are for the same height (obvious fail). - if head.Height != sh.Height { - return errors.New("header heights mismatched") - } - // Check if they are equal by using hashes. - if !bytes.Equal(head.Hash(), sh.Hash()) { - return errors.New("headers don't match") - } - return nil -} diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go deleted file mode 100644 index 41923659f..000000000 --- a/lite/proxy/errors.go +++ /dev/null @@ -1,21 +0,0 @@ -package proxy - -import ( - "github.com/pkg/errors" -) - -type errNoData struct{} - -func (e errNoData) Error() string { - return "No data returned for query" -} - -// IsErrNoData checks whether an error is due to a query returning empty data -func IsErrNoData(err error) bool { - _, ok := errors.Cause(err).(errNoData) - return ok -} - -func ErrNoData() error { - return errors.Wrap(errNoData{}, "") -} diff --git a/lite/proxy/proof.go b/lite/proxy/proof.go deleted file mode 100644 index 452dee277..000000000 --- a/lite/proxy/proof.go +++ /dev/null @@ -1,14 +0,0 @@ -package proxy - -import ( - "github.com/tendermint/tendermint/crypto/merkle" -) - -func defaultProofRuntime() *merkle.ProofRuntime { - prt := merkle.NewProofRuntime() - prt.RegisterOpDecoder( - merkle.ProofOpSimpleValue, - merkle.SimpleValueOpDecoder, - ) - return prt -} diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go deleted file mode 100644 index 5fb51f0b3..000000000 --- a/lite/proxy/proxy.go +++ /dev/null @@ -1,182 +0,0 @@ -package proxy - -import ( - "context" - "net/http" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/client" - rpcclient "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tendermint/types" -) - -const ( - wsEndpoint = "/websocket" -) - -// StartProxy will start the websocket manager on the client, -// set up the rpc routes to proxy via the given client, -// and start up an http/rpc server on the location given by bind (eg. :1234) -// NOTE: This function blocks - you may want to call it in a go-routine. -func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpenConnections int) error { - err := c.Start() - if err != nil { - return err - } - - cdc := amino.NewCodec() - ctypes.RegisterAmino(cdc) - r := RPCRoutes(c) - - // build the handler... - mux := http.NewServeMux() - rpcserver.RegisterRPCFuncs(mux, r, cdc, logger) - - unsubscribeFromAllEvents := func(remoteAddr string) { - if err := c.UnsubscribeAll(context.Background(), remoteAddr); err != nil { - logger.Error("Failed to unsubscribe from events", "err", err) - } - } - wm := rpcserver.NewWebsocketManager(r, cdc, rpcserver.OnDisconnect(unsubscribeFromAllEvents)) - wm.SetLogger(logger) - core.SetLogger(logger) - mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) - - config := rpcserver.DefaultConfig() - config.MaxOpenConnections = maxOpenConnections - l, err := rpcserver.Listen(listenAddr, config) - if err != nil { - return err - } - return rpcserver.StartHTTPServer(l, mux, logger, config) -} - -// RPCRoutes just routes everything to the given client, as if it were -// a tendermint fullnode. -// -// if we want security, the client must implement it as a secure client -func RPCRoutes(c rpcclient.Client) map[string]*rpcserver.RPCFunc { - return map[string]*rpcserver.RPCFunc{ - // Subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).SubscribeWS, "query"), - "unsubscribe": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeWS, "query"), - "unsubscribe_all": rpcserver.NewWSRPCFunc(c.(Wrapper).UnsubscribeAllWS, ""), - - // info API - "status": rpcserver.NewRPCFunc(makeStatusFunc(c), ""), - "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight"), - "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), ""), - "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height"), - "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height"), - "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove"), - "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height"), - - // broadcast API - "broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx"), - "broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx"), - "broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx"), - - // abci API - "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data"), - "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), ""), - } -} - -func makeStatusFunc(c client.StatusClient) func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { - return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { - return c.Status() - } -} - -func makeBlockchainInfoFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - minHeight, - maxHeight int64, -) (*ctypes.ResultBlockchainInfo, error) { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return c.BlockchainInfo(minHeight, maxHeight) - } -} - -func makeGenesisFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return c.Genesis() - } -} - -func makeBlockFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { - return c.Block(height) - } -} - -func makeCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { - return c.Commit(height) - } -} - -func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { - return c.Tx(hash, prove) - } -} - -func makeValidatorsFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - height *int64, -) (*ctypes.ResultValidators, error) { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { - return c.Validators(height, 0, 0) - } -} - -func makeBroadcastTxCommitFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - tx types.Tx, -) (*ctypes.ResultBroadcastTxCommit, error) { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return c.BroadcastTxCommit(tx) - } -} - -func makeBroadcastTxSyncFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.BroadcastTxSync(tx) - } -} - -func makeBroadcastTxAsyncFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.BroadcastTxAsync(tx) - } -} - -func makeABCIQueryFunc(c rpcclient.Client) func( - ctx *rpctypes.Context, - path string, - data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { - return func(ctx *rpctypes.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQuery(path, data) - } -} - -func makeABCIInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - return c.ABCIInfo() - } -} diff --git a/lite/proxy/query.go b/lite/proxy/query.go deleted file mode 100644 index f95b4708b..000000000 --- a/lite/proxy/query.go +++ /dev/null @@ -1,148 +0,0 @@ -package proxy - -import ( - "fmt" - "strings" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/lite" - lerr "github.com/tendermint/tendermint/lite/errors" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" -) - -// GetWithProof will query the key on the given node, and verify it has -// a valid proof, as defined by the Verifier. -// -// If there is any error in checking, returns an error. -func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client, - cert lite.Verifier) ( - val bytes.HexBytes, height int64, proof *merkle.Proof, err error) { - - if reqHeight < 0 { - err = errors.New("height cannot be negative") - return - } - - res, err := GetWithProofOptions(prt, "/key", key, - rpcclient.ABCIQueryOptions{Height: reqHeight, Prove: true}, - node, cert) - if err != nil { - return - } - - resp := res.Response - val, height = resp.Value, resp.Height - return val, height, proof, err -} - -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions. -// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store. -func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions, - node rpcclient.Client, cert lite.Verifier) ( - *ctypes.ResultABCIQuery, error) { - opts.Prove = true - res, err := node.ABCIQueryWithOptions(path, key, opts) - if err != nil { - return nil, err - } - resp := res.Response - - // Validate the response, e.g. height. - if resp.IsErr() { - err = errors.Errorf("query error for key %d: %d", key, resp.Code) - return nil, err - } - - if len(resp.Key) == 0 || resp.Proof == nil { - return nil, lerr.ErrEmptyTree() - } - if resp.Height == 0 { - return nil, errors.New("height returned is zero") - } - - // AppHash for height H is in header H+1 - signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) - if err != nil { - return nil, err - } - - // Validate the proof against the certified header to ensure data integrity. - if resp.Value != nil { - // Value exists - // XXX How do we encode the key into a string... - storeName, err := parseQueryStorePath(path) - if err != nil { - return nil, err - } - - kp := merkle.KeyPath{} - kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL) - kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL) - err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, kp.String(), resp.Value) - if err != nil { - return nil, errors.Wrap(err, "couldn't verify value proof") - } - - return &ctypes.ResultABCIQuery{Response: resp}, nil - } - - // Value absent - // Validate the proof against the certified header to ensure data integrity. - // XXX How do we encode the key into a string... - err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key)) - if err != nil { - return nil, errors.Wrap(err, "couldn't verify absence proof") - } - - return &ctypes.ResultABCIQuery{Response: resp}, nil -} - -func parseQueryStorePath(path string) (storeName string, err error) { - if !strings.HasPrefix(path, "/") { - return "", fmt.Errorf("expected path to start with /") - } - - paths := strings.SplitN(path[1:], "/", 3) - switch { - case len(paths) != 3: - return "", fmt.Errorf("expected format like /store//key") - case paths[0] != "store": - return "", fmt.Errorf("expected format like /store//key") - case paths[2] != "key": - return "", fmt.Errorf("expected format like /store//key") - } - - return paths[1], nil -} - -// GetCertifiedCommit gets the signed header for a given height and certifies -// it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (types.SignedHeader, error) { - - // FIXME: cannot use cert.GetByHeight for now, as it also requires - // Validators and will fail on querying tendermint for non-current height. - // When this is supported, we should use it instead... - rpcclient.WaitForHeight(client, h, nil) - cresp, err := client.Commit(&h) - if err != nil { - return types.SignedHeader{}, err - } - - // Validate downloaded checkpoint with our request and trust store. - sh := cresp.SignedHeader - if sh.Height != h { - return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v", - h, sh.Height) - } - - if err = cert.Verify(sh); err != nil { - return types.SignedHeader{}, err - } - - return sh, nil -} diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go deleted file mode 100644 index 766a86040..000000000 --- a/lite/proxy/query_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package proxy - -import ( - "fmt" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/rpc/client" - rpclocal "github.com/tendermint/tendermint/rpc/client/local" - rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" -) - -var node *nm.Node -var chainID = "tendermint_test" // TODO use from config. -//nolint:unused -var waitForEventTimeout = 5 * time.Second - -// TODO fix tests!! - -func TestMain(m *testing.M) { - app := kvstore.NewApplication() - node = rpctest.StartTendermint(app) - - code := m.Run() - - rpctest.StopTendermint(node) - os.Exit(code) -} - -func kvstoreTx(k, v []byte) []byte { - return []byte(fmt.Sprintf("%s=%s", k, v)) -} - -// TODO: enable it after general proof format has been adapted -// in abci/examples/kvstore.go -//nolint:unused,deadcode -func _TestAppProofs(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - prt := defaultProofRuntime() - cl := rpclocal.New(node) - client.WaitForHeight(cl, 1, nil) - - // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(chainID, cl) - seed, err := source.LatestFullCommit(chainID, 1, 1) - require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) - - // Wait for tx confirmation. - done := make(chan int64) - go func() { - evtTyp := types.EventTx - _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout) - require.Nil(err, "%#v", err) - close(done) - }() - - // Submit a transaction. - k := []byte("my-key") - v := []byte("my-value") - tx := kvstoreTx(k, v) - br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%#v", err) - require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) - require.EqualValues(0, br.DeliverTx.Code) - brh := br.Height - - // Fetch latest after tx commit. - <-done - latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) - require.NoError(err, "%#v", err) - rootHash := latest.SignedHeader.AppHash - if rootHash == nil { - // Fetch one block later, AppHash hasn't been committed yet. - // TODO find a way to avoid doing this. - client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil) - latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1) - require.NoError(err, "%#v", err) - rootHash = latest.SignedHeader.AppHash - } - require.NotNil(rootHash) - - // verify a query before the tx block has no data (and valid non-exist proof) - bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) - require.NoError(err, "%#v", err) - require.NotNil(proof) - require.Equal(height, brh-1) - // require.NotNil(proof) - // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, - // (currently there's a race condition) - // and ensure that proof proves absence of k. - require.Nil(bs) - - // but given that block it is good - bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert) - require.NoError(err, "%#v", err) - require.NotNil(proof) - require.Equal(height, brh) - - assert.EqualValues(v, bs) - err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding - assert.NoError(err, "%#v", err) - - // Test non-existing key. - missing := []byte("my-missing-key") - bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert) - require.NoError(err) - require.Nil(bs) - require.NotNil(proof) - err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding - assert.NoError(err, "%#v", err) - err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding - assert.Error(err, "%#v", err) -} - -func TestTxProofs(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - cl := rpclocal.New(node) - client.WaitForHeight(cl, 1, nil) - - tx := kvstoreTx([]byte("key-a"), []byte("value-a")) - br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%#v", err) - require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) - require.EqualValues(0, br.DeliverTx.Code) - brh := br.Height - - source := certclient.NewProvider(chainID, cl) - seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) - require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) - - // First let's make sure a bogus transaction hash returns a valid non-existence proof. - key := types.Tx([]byte("bogus")).Hash() - _, err = cl.Tx(key, true) - require.NotNil(err) - require.Contains(err.Error(), "not found") - - // Now let's check with the real tx root hash. - key = types.Tx(tx).Hash() - res, err := cl.Tx(key, true) - require.NoError(err, "%#v", err) - require.NotNil(res) - keyHash := merkle.SimpleHashFromByteSlices([][]byte{key}) - err = res.Proof.Validate(keyHash) - assert.NoError(err, "%#v", err) - - commit, err := GetCertifiedCommit(br.Height, cl, cert) - require.Nil(err, "%#v", err) - require.Equal(res.Proof.RootHash, commit.Header.DataHash) -} diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go deleted file mode 100644 index cf9a0de6b..000000000 --- a/lite/proxy/validate_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package proxy_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/lite/proxy" - "github.com/tendermint/tendermint/types" -) - -var ( - deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")} - deadBeefHash = deadBeefTxs.Hash() - testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC) - testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC) -) - -var hdrHeight11 = types.Header{ - Height: 11, - Time: testTime1, - ValidatorsHash: []byte("Tendermint"), -} - -func TestValidateBlock(t *testing.T) { - tests := []struct { - block *types.Block - signedHeader types.SignedHeader - wantErr string - }{ - { - block: nil, wantErr: "non-nil Block", - }, - { - block: &types.Block{}, wantErr: "unexpected empty SignedHeader", - }, - - // Start Header.Height mismatch test - { - block: &types.Block{Header: types.Header{Height: 10}}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - wantErr: "header heights mismatched", - }, - - { - block: &types.Block{Header: types.Header{Height: 11}}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - }, - // End Header.Height mismatch test - - // Start Header.Hash mismatch test - { - block: &types.Block{Header: hdrHeight11}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - wantErr: "headers don't match", - }, - - { - block: &types.Block{Header: hdrHeight11}, - signedHeader: types.SignedHeader{Header: &hdrHeight11}, - }, - // End Header.Hash mismatch test - - // Start Header.Data hash mismatch test - { - block: &types.Block{ - Header: types.Header{Height: 11}, - Data: types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, - }, - signedHeader: types.SignedHeader{ - Header: &types.Header{Height: 11}, - Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("0xDEADBEEF")}, nil), - }, - wantErr: "data hash doesn't match header", - }, - { - block: &types.Block{ - Header: types.Header{Height: 11, DataHash: deadBeefHash}, - Data: types.Data{Txs: deadBeefTxs}, - }, - signedHeader: types.SignedHeader{ - Header: &types.Header{Height: 11}, - Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), - }, - }, - // End Header.Data hash mismatch test - } - - for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.signedHeader) - if tt.wantErr != "" { - if err == nil { - assert.FailNowf(t, "Unexpectedly passed", "#%d", i) - } else { - assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) - } - continue - } - - assert.Nil(t, err, "#%d: expecting a nil error", i) - } -} - -func TestValidateBlockMeta(t *testing.T) { - tests := []struct { - meta *types.BlockMeta - signedHeader types.SignedHeader - wantErr string - }{ - { - meta: nil, wantErr: "non-nil BlockMeta", - }, - { - meta: &types.BlockMeta{}, wantErr: "unexpected empty SignedHeader", - }, - - // Start Header.Height mismatch test - { - meta: &types.BlockMeta{Header: types.Header{Height: 10}}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - wantErr: "header heights mismatched", - }, - - { - meta: &types.BlockMeta{Header: types.Header{Height: 11}}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - }, - // End Header.Height mismatch test - - // Start Headers don't match test - { - meta: &types.BlockMeta{Header: hdrHeight11}, - signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, - wantErr: "headers don't match", - }, - - { - meta: &types.BlockMeta{Header: hdrHeight11}, - signedHeader: types.SignedHeader{Header: &hdrHeight11}, - }, - - { - meta: &types.BlockMeta{ - Header: types.Header{ - Height: 11, - ValidatorsHash: []byte("lite-test"), - // TODO: should be able to use empty time after Amino upgrade - Time: testTime1, - }, - }, - signedHeader: types.SignedHeader{ - Header: &types.Header{Height: 11, DataHash: deadBeefHash}, - }, - wantErr: "headers don't match", - }, - - { - meta: &types.BlockMeta{ - Header: types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime1, - }, - }, - signedHeader: types.SignedHeader{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, - }, - Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), - }, - wantErr: "headers don't match", - }, - - { - meta: &types.BlockMeta{ - Header: types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, - }, - }, - signedHeader: types.SignedHeader{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint-x"), - Time: testTime2, - }, - Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), - }, - wantErr: "headers don't match", - }, - // End Headers don't match test - } - - for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader) - if tt.wantErr != "" { - if err == nil { - assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) - } else { - assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) - } - continue - } - - assert.Nil(t, err, "#%d: expecting a nil error", i) - } -} diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go deleted file mode 100644 index 5486a3ea9..000000000 --- a/lite/proxy/verifier.go +++ /dev/null @@ -1,49 +0,0 @@ -package proxy - -import ( - "github.com/pkg/errors" - - dbm "github.com/tendermint/tm-db" - - log "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/lite" - lclient "github.com/tendermint/tendermint/lite/client" -) - -func NewVerifier( - chainID, - rootDir string, - client lclient.SignStatusClient, - logger log.Logger, - cacheSize int, -) (*lite.DynamicVerifier, error) { - - logger = logger.With("module", "lite/proxy") - logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) - - memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) - lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.GoLevelDBBackend, rootDir)) - trust := lite.NewMultiProvider( - memProvider, - lvlProvider, - ) - source := lclient.NewProvider(chainID, client) - cert := lite.NewDynamicVerifier(chainID, trust, source) - cert.SetLogger(logger) // Sets logger recursively. - - // TODO: Make this more secure, e.g. make it interactive in the console? - _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) - if err != nil { - logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...") - fc, err := source.LatestFullCommit(chainID, 1, 1) - if err != nil { - return nil, errors.Wrap(err, "fetching source full commit @ height 1") - } - err = trust.SaveFullCommit(fc) - if err != nil { - return nil, errors.Wrap(err, "saving full commit to trusted") - } - } - - return cert, nil -} diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go deleted file mode 100644 index e823cc5f0..000000000 --- a/lite/proxy/wrapper.go +++ /dev/null @@ -1,255 +0,0 @@ -package proxy - -import ( - "context" - - "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/lite" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" -) - -var _ rpcclient.Client = Wrapper{} - -// Wrapper wraps a rpcclient with a Verifier and double-checks any input that is -// provable before passing it along. Allows you to make any rpcclient fully secure. -type Wrapper struct { - rpcclient.Client - cert *lite.DynamicVerifier - prt *merkle.ProofRuntime -} - -// SecureClient uses a given Verifier to wrap an connection to an untrusted -// host and return a cryptographically secure rpc client. -// -// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface -func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { - prt := defaultProofRuntime() - wrap := Wrapper{c, cert, prt} - // TODO: no longer possible as no more such interface exposed.... - // if we wrap http client, then we can swap out the event switch to filter - // if hc, ok := c.(*rpcclient.HTTP); ok { - // evt := hc.WSEvents.EventSwitch - // hc.WSEvents.EventSwitch = WrappedSwitch{evt, wrap} - // } - return wrap -} - -// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof -func (w Wrapper) ABCIQueryWithOptions(path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - - res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert) - return res, err -} - -// ABCIQuery uses default options for the ABCI query and verifies the returned proof -func (w Wrapper) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return w.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) -} - -// Tx queries for a given tx and verifies the proof if it was requested -func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - res, err := w.Client.Tx(hash, prove) - if !prove || err != nil { - return res, err - } - h := res.Height - sh, err := GetCertifiedCommit(h, w.Client, w.cert) - if err != nil { - return res, err - } - err = res.Proof.Validate(sh.DataHash) - return res, err -} - -// BlockchainInfo requests a list of headers and verifies them all... -// Rather expensive. -// -// TODO: optimize this if used for anything needing performance -func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - r, err := w.Client.BlockchainInfo(minHeight, maxHeight) - if err != nil { - return nil, err - } - - // go and verify every blockmeta in the result.... - for _, meta := range r.BlockMetas { - // get a checkpoint to verify from - res, err := w.Commit(&meta.Header.Height) - if err != nil { - return nil, err - } - sh := res.SignedHeader - err = ValidateBlockMeta(meta, sh) - if err != nil { - return nil, err - } - } - - return r, nil -} - -// Block returns an entire block and verifies all signatures -func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { - resBlock, err := w.Client.Block(height) - if err != nil { - return nil, err - } - // get a checkpoint to verify from - resCommit, err := w.Commit(height) - if err != nil { - return nil, err - } - sh := resCommit.SignedHeader - - err = ValidateBlock(resBlock.Block, sh) - if err != nil { - return nil, err - } - return resBlock, nil -} - -// Commit downloads the Commit and certifies it with the lite. -// -// This is the foundation for all other verification in this module -func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { - if height == nil { - resStatus, err := w.Client.Status() - if err != nil { - return nil, err - } - // NOTE: If resStatus.CatchingUp, there is a race - // condition where the validator set for the next height - // isn't available until some time after the blockstore - // has height h on the remote node. This isn't an issue - // once the node has caught up, and a syncing node likely - // won't have this issue esp with the implementation we - // have here, but we may have to address this at some - // point. - height = new(int64) - *height = resStatus.SyncInfo.LatestBlockHeight - } - rpcclient.WaitForHeight(w.Client, *height, nil) - res, err := w.Client.Commit(height) - // if we got it, then verify it - if err == nil { - sh := res.SignedHeader - err = w.cert.Verify(sh) - } - return res, err -} - -func (w Wrapper) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { - w.prt.RegisterOpDecoder(typ, dec) -} - -// SubscribeWS subscribes for events using the given query and remote address as -// a subscriber, but does not verify responses (UNSAFE)! -func (w Wrapper) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { - out, err := w.Client.Subscribe(context.Background(), ctx.RemoteAddr(), query) - if err != nil { - return nil, err - } - - go func() { - for { - select { - case resultEvent := <-out: - // XXX(melekes) We should have a switch here that performs a validation - // depending on the event's type. - ctx.WSConn.TryWriteRPCResponse( - rpctypes.NewRPCSuccessResponse( - ctx.WSConn.Codec(), - ctx.JSONReq.ID, - resultEvent, - )) - case <-w.Client.Quit(): - return - } - } - }() - - return &ctypes.ResultSubscribe{}, nil -} - -// UnsubscribeWS calls original client's Unsubscribe using remote address as a -// subscriber. -func (w Wrapper) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { - err := w.Client.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address -// as a subscriber. -func (w Wrapper) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { - err := w.Client.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -// // WrappedSwitch creates a websocket connection that auto-verifies any info -// // coming through before passing it along. -// // -// // Since the verification takes 1-2 rpc calls, this is obviously only for -// // relatively low-throughput situations that can tolerate a bit extra latency -// type WrappedSwitch struct { -// types.EventSwitch -// client rpcclient.Client -// } - -// // FireEvent verifies any block or header returned from the eventswitch -// func (s WrappedSwitch) FireEvent(event string, data events.EventData) { -// tm, ok := data.(types.TMEventData) -// if !ok { -// fmt.Printf("bad type %#v\n", data) -// return -// } - -// // check to validate it if possible, and drop if not valid -// switch t := tm.(type) { -// case types.EventDataNewBlockHeader: -// err := verifyHeader(s.client, t.Header) -// if err != nil { -// fmt.Printf("Invalid header: %#v\n", err) -// return -// } -// case types.EventDataNewBlock: -// err := verifyBlock(s.client, t.Block) -// if err != nil { -// fmt.Printf("Invalid block: %#v\n", err) -// return -// } -// // TODO: can we verify tx as well? anything else -// } - -// // looks good, we fire it -// s.EventSwitch.FireEvent(event, data) -// } - -// func verifyHeader(c rpcclient.Client, head *types.Header) error { -// // get a checkpoint to verify from -// commit, err := c.Commit(&head.Height) -// if err != nil { -// return err -// } -// check := certclient.CommitFromResult(commit) -// return ValidateHeader(head, check) -// } -// -// func verifyBlock(c rpcclient.Client, block *types.Block) error { -// // get a checkpoint to verify from -// commit, err := c.Commit(&block.Height) -// if err != nil { -// return err -// } -// check := certclient.CommitFromResult(commit) -// return ValidateBlock(block, check) -// } diff --git a/lite/types.go b/lite/types.go deleted file mode 100644 index 643f5ad48..000000000 --- a/lite/types.go +++ /dev/null @@ -1,13 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" -) - -// Verifier checks the votes to make sure the block really is signed properly. -// Verifier must know the current or recent set of validitors by some other -// means. -type Verifier interface { - Verify(sheader types.SignedHeader) error - ChainID() string -} diff --git a/lite2/client.go b/lite2/client.go deleted file mode 100644 index 6fcb7d173..000000000 --- a/lite2/client.go +++ /dev/null @@ -1,1113 +0,0 @@ -package lite - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "time" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/lite2/provider" - "github.com/tendermint/tendermint/lite2/store" - "github.com/tendermint/tendermint/types" -) - -type mode byte - -const ( - sequential mode = iota + 1 - skipping - - defaultPruningSize = 1000 - defaultMaxRetryAttempts = 10 - // For bisection, when using the cache of headers from the previous batch, - // they will always be at a height greater than 1/2 (normal bisection) so to - // find something in between the range, 9/16 is used. - bisectionNumerator = 9 - bisectionDenominator = 16 - - // 10s should cover most of the clients. - // References: - // - http://vancouver-webpages.com/time/web.html - // - https://blog.codinghorror.com/keeping-time-on-the-pc/ - defaultMaxClockDrift = 10 * time.Second -) - -// Option sets a parameter for the light client. -type Option func(*Client) - -// SequentialVerification option configures the light client to sequentially -// check the headers (every header, in ascending height order). Note this is -// much slower than SkippingVerification, albeit more secure. -func SequentialVerification() Option { - return func(c *Client) { - c.verificationMode = sequential - } -} - -// SkippingVerification option configures the light client to skip headers as -// long as {trustLevel} of the old validator set signed the new header. The -// bisection algorithm from the specification is used for finding the minimal -// "trust path". -// -// trustLevel - fraction of the old validator set (in terms of voting power), -// which must sign the new header in order for us to trust it. NOTE this only -// applies to non-adjacent headers. For adjacent headers, sequential -// verification is used. -func SkippingVerification(trustLevel tmmath.Fraction) Option { - return func(c *Client) { - c.verificationMode = skipping - c.trustLevel = trustLevel - } -} - -// PruningSize option sets the maximum amount of headers & validator set pairs -// that the light client stores. When Prune() is run, all headers (along with -// the associated validator sets) that are earlier than the h amount of headers -// will be removed from the store. Default: 1000. A pruning size of 0 will not -// prune the lite client at all. -func PruningSize(h uint16) Option { - return func(c *Client) { - c.pruningSize = h - } -} - -// ConfirmationFunction option can be used to prompt to confirm an action. For -// example, remove newer headers if the light client is being reset with an -// older header. No confirmation is required by default! -func ConfirmationFunction(fn func(action string) bool) Option { - return func(c *Client) { - c.confirmationFn = fn - } -} - -// Logger option can be used to set a logger for the client. -func Logger(l log.Logger) Option { - return func(c *Client) { - c.logger = l - } -} - -// MaxRetryAttempts option can be used to set max attempts before replacing -// primary with a witness. -func MaxRetryAttempts(max uint16) Option { - return func(c *Client) { - c.maxRetryAttempts = max - } -} - -// MaxClockDrift defines how much new (untrusted) header's Time can drift into -// the future. Default: 10s. -func MaxClockDrift(d time.Duration) Option { - return func(c *Client) { - c.maxClockDrift = d - } -} - -// Client represents a light client, connected to a single chain, which gets -// headers from a primary provider, verifies them either sequentially or by -// skipping some and stores them in a trusted store (usually, a local FS). -// -// Default verification: SkippingVerification(DefaultTrustLevel) -type Client struct { - chainID string - trustingPeriod time.Duration // see TrustOptions.Period - verificationMode mode - trustLevel tmmath.Fraction - maxRetryAttempts uint16 // see MaxRetryAttempts option - maxClockDrift time.Duration - - // Mutex for locking during changes of the lite clients providers - providerMutex sync.Mutex - // Primary provider of new headers. - primary provider.Provider - // See Witnesses option - witnesses []provider.Provider - - // Where trusted headers are stored. - trustedStore store.Store - // Highest trusted header from the store (height=H). - latestTrustedHeader *types.SignedHeader - // Highest validator set from the store (height=H). - latestTrustedVals *types.ValidatorSet - - // See RemoveNoLongerTrustedHeadersPeriod option - pruningSize uint16 - // See ConfirmationFunction option - confirmationFn func(action string) bool - - quit chan struct{} - - logger log.Logger -} - -// NewClient returns a new light client. It returns an error if it fails to -// obtain the header & vals from the primary or they are invalid (e.g. trust -// hash does not match with the one from the header). -// -// Witnesses are providers, which will be used for cross-checking the primary -// provider. At least one witness must be given. A witness can become a primary -// iff the current primary is unavailable. -// -// See all Option(s) for the additional configuration. -func NewClient( - chainID string, - trustOptions TrustOptions, - primary provider.Provider, - witnesses []provider.Provider, - trustedStore store.Store, - options ...Option) (*Client, error) { - - if err := trustOptions.ValidateBasic(); err != nil { - return nil, fmt.Errorf("invalid TrustOptions: %w", err) - } - - c, err := NewClientFromTrustedStore(chainID, trustOptions.Period, primary, witnesses, trustedStore, options...) - if err != nil { - return nil, err - } - - if c.latestTrustedHeader != nil { - c.logger.Info("Checking trusted header using options") - if err := c.checkTrustedHeaderUsingOptions(trustOptions); err != nil { - return nil, err - } - } - - if c.latestTrustedHeader == nil || c.latestTrustedHeader.Height < trustOptions.Height { - c.logger.Info("Downloading trusted header using options") - if err := c.initializeWithTrustOptions(trustOptions); err != nil { - return nil, err - } - } - - return c, err -} - -// NewClientFromTrustedStore initializes existing client from the trusted store. -// -// See NewClient -func NewClientFromTrustedStore( - chainID string, - trustingPeriod time.Duration, - primary provider.Provider, - witnesses []provider.Provider, - trustedStore store.Store, - options ...Option) (*Client, error) { - - c := &Client{ - chainID: chainID, - trustingPeriod: trustingPeriod, - verificationMode: skipping, - trustLevel: DefaultTrustLevel, - maxRetryAttempts: defaultMaxRetryAttempts, - maxClockDrift: defaultMaxClockDrift, - primary: primary, - witnesses: witnesses, - trustedStore: trustedStore, - pruningSize: defaultPruningSize, - confirmationFn: func(action string) bool { return true }, - quit: make(chan struct{}), - logger: log.NewNopLogger(), - } - - for _, o := range options { - o(c) - } - - // Validate the number of witnesses. - if len(c.witnesses) < 1 { - return nil, errNoWitnesses{} - } - - // Verify witnesses are all on the same chain. - for i, w := range witnesses { - if w.ChainID() != chainID { - return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", - i, w, w.ChainID(), chainID) - } - } - - // Validate trust level. - if err := ValidateTrustLevel(c.trustLevel); err != nil { - return nil, err - } - - if err := c.restoreTrustedHeaderAndVals(); err != nil { - return nil, err - } - - return c, nil -} - -// restoreTrustedHeaderAndVals loads trustedHeader and trustedVals from -// trustedStore. -func (c *Client) restoreTrustedHeaderAndVals() error { - lastHeight, err := c.trustedStore.LastSignedHeaderHeight() - if err != nil { - return fmt.Errorf("can't get last trusted header height: %w", err) - } - - if lastHeight > 0 { - trustedHeader, err := c.trustedStore.SignedHeader(lastHeight) - if err != nil { - return fmt.Errorf("can't get last trusted header: %w", err) - } - - trustedVals, err := c.trustedStore.ValidatorSet(lastHeight) - if err != nil { - return fmt.Errorf("can't get last trusted validators: %w", err) - } - - c.latestTrustedHeader = trustedHeader - c.latestTrustedVals = trustedVals - - c.logger.Info("Restored trusted header and vals", "height", lastHeight) - } - - return nil -} - -// if options.Height: -// -// 1) ahead of trustedHeader.Height => fetch header (same height as -// trustedHeader) from primary provider and check it's hash matches the -// trustedHeader's hash (if not, remove trustedHeader and all the headers -// before) -// -// 2) equals trustedHeader.Height => check options.Hash matches the -// trustedHeader's hash (if not, remove trustedHeader and all the headers -// before) -// -// 3) behind trustedHeader.Height => remove all the headers between -// options.Height and trustedHeader.Height, update trustedHeader, then -// check options.Hash matches the trustedHeader's hash (if not, remove -// trustedHeader and all the headers before) -// -// The intuition here is the user is always right. I.e. if she decides to reset -// the light client with an older header, there must be a reason for it. -func (c *Client) checkTrustedHeaderUsingOptions(options TrustOptions) error { - var primaryHash []byte - switch { - case options.Height > c.latestTrustedHeader.Height: - h, err := c.signedHeaderFromPrimary(c.latestTrustedHeader.Height) - if err != nil { - return err - } - primaryHash = h.Hash() - case options.Height == c.latestTrustedHeader.Height: - primaryHash = options.Hash - case options.Height < c.latestTrustedHeader.Height: - c.logger.Info("Client initialized with old header (trusted is more recent)", - "old", options.Height, - "trustedHeight", c.latestTrustedHeader.Height, - "trustedHash", hash2str(c.latestTrustedHeader.Hash())) - - action := fmt.Sprintf( - "Rollback to %d (%X)? Note this will remove newer headers up to %d (%X)", - options.Height, options.Hash, - c.latestTrustedHeader.Height, c.latestTrustedHeader.Hash()) - if c.confirmationFn(action) { - // remove all the headers (options.Height, trustedHeader.Height] - err := c.cleanupAfter(options.Height) - if err != nil { - return fmt.Errorf("cleanupAfter(%d): %w", options.Height, err) - } - - c.logger.Info("Rolled back to older header (newer headers were removed)", - "old", options.Height) - } else { - return nil - } - - primaryHash = options.Hash - } - - if !bytes.Equal(primaryHash, c.latestTrustedHeader.Hash()) { - c.logger.Info("Prev. trusted header's hash (h1) doesn't match hash from primary provider (h2)", - "h1", hash2str(c.latestTrustedHeader.Hash()), "h2", hash2str(primaryHash)) - - action := fmt.Sprintf( - "Prev. trusted header's hash %X doesn't match hash %X from primary provider. Remove all the stored headers?", - c.latestTrustedHeader.Hash(), primaryHash) - if c.confirmationFn(action) { - err := c.Cleanup() - if err != nil { - return fmt.Errorf("failed to cleanup: %w", err) - } - } else { - return errors.New("refused to remove the stored headers despite hashes mismatch") - } - } - - return nil -} - -// initializeWithTrustOptions fetches the weakly-trusted header and vals from -// primary provider. The header is cross-checked with witnesses for additional -// security. -func (c *Client) initializeWithTrustOptions(options TrustOptions) error { - // 1) Fetch and verify the header. - h, err := c.signedHeaderFromPrimary(options.Height) - if err != nil { - return err - } - - // NOTE: - Verify func will check if it's expired or not. - // - h.Time is not being checked against time.Now() because we don't - // want to add yet another argument to NewClient* functions. - if err := h.ValidateBasic(c.chainID); err != nil { - return err - } - - if !bytes.Equal(h.Hash(), options.Hash) { - return fmt.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash()) - } - - err = c.compareNewHeaderWithWitnesses(h) - if err != nil { - return err - } - - // 2) Fetch and verify the vals. - vals, err := c.validatorSetFromPrimary(options.Height) - if err != nil { - return err - } - - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return fmt.Errorf("expected header's validators (%X) to match those that were supplied (%X)", - h.ValidatorsHash, - vals.Hash(), - ) - } - - // Ensure that +2/3 of validators signed correctly. - err = vals.VerifyCommit(c.chainID, h.Commit.BlockID, h.Height, h.Commit) - if err != nil { - return fmt.Errorf("invalid commit: %w", err) - } - - // 3) Persist both of them and continue. - return c.updateTrustedHeaderAndVals(h, vals) -} - -// TrustedHeader returns a trusted header at the given height (0 - the latest). -// -// Headers along with validator sets, which can't be trusted anymore, are -// removed once a day (can be changed with RemoveNoLongerTrustedHeadersPeriod -// option). -// . -// height must be >= 0. -// -// It returns an error if: -// - there are some issues with the trusted store, although that should not -// happen normally; -// - negative height is passed; -// - header has not been verified yet and is therefore not in the store -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) TrustedHeader(height int64) (*types.SignedHeader, error) { - height, err := c.compareWithLatestHeight(height) - if err != nil { - return nil, err - } - return c.trustedStore.SignedHeader(height) -} - -// TrustedValidatorSet returns a trusted validator set at the given height (0 - -// latest). The second return parameter is the height used (useful if 0 was -// passed; otherwise can be ignored). -// -// height must be >= 0. -// -// Headers along with validator sets are -// removed once a day (can be changed with RemoveNoLongerTrustedHeadersPeriod -// option). -// -// Function returns an error if: -// - there are some issues with the trusted store, although that should not -// happen normally; -// - negative height is passed; -// - header signed by that validator set has not been verified yet -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) TrustedValidatorSet(height int64) (valSet *types.ValidatorSet, heightUsed int64, err error) { - heightUsed, err = c.compareWithLatestHeight(height) - if err != nil { - return nil, heightUsed, err - } - valSet, err = c.trustedStore.ValidatorSet(heightUsed) - if err != nil { - return nil, heightUsed, err - } - return valSet, heightUsed, err -} - -func (c *Client) compareWithLatestHeight(height int64) (int64, error) { - latestHeight, err := c.LastTrustedHeight() - if err != nil { - return 0, fmt.Errorf("can't get last trusted height: %w", err) - } - if latestHeight == -1 { - return 0, errors.New("no headers exist") - } - - switch { - case height > latestHeight: - return 0, fmt.Errorf("unverified header/valset requested (latest: %d)", latestHeight) - case height == 0: - return latestHeight, nil - case height < 0: - return 0, errors.New("negative height") - } - - return height, nil -} - -// VerifyHeaderAtHeight fetches header and validators at the given height -// and calls VerifyHeader. It returns header immediately if such exists in -// trustedStore (no verification is needed). -// -// height must be > 0. -// -// It returns provider.ErrSignedHeaderNotFound if header is not found by -// primary. -func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) (*types.SignedHeader, error) { - if height <= 0 { - return nil, errors.New("negative or zero height") - } - - // Check if header already verified. - h, err := c.TrustedHeader(height) - if err == nil { - c.logger.Info("Header has already been verified", "height", height, "hash", hash2str(h.Hash())) - // Return already trusted header - return h, nil - } - - // Request the header and the vals. - newHeader, newVals, err := c.fetchHeaderAndValsAtHeight(height) - if err != nil { - return nil, err - } - - return newHeader, c.verifyHeader(newHeader, newVals, now) -} - -// VerifyHeader verifies new header against the trusted state. It returns -// immediately if newHeader exists in trustedStore (no verification is -// needed). Else it performs one of the two types of verification: -// -// SequentialVerification: verifies that 2/3 of the trusted validator set has -// signed the new header. If the headers are not adjacent, **all** intermediate -// headers will be requested. Intermediate headers are not saved to database. -// -// SkippingVerification(trustLevel): verifies that {trustLevel} of the trusted -// validator set has signed the new header. If it's not the case and the -// headers are not adjacent, bisection is performed and necessary (not all) -// intermediate headers will be requested. See the specification for details. -// Intermediate headers are not saved to database. -// https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md -// -// If the header, which is older than the currently trusted header, is -// requested and the light client does not have it, VerifyHeader will perform: -// a) bisection verification if nearest trusted header is found & not expired -// b) backwards verification in all other cases -// -// It returns ErrOldHeaderExpired if the latest trusted header expired. -// -// If the primary provides an invalid header (ErrInvalidHeader), it is rejected -// and replaced by another provider until all are exhausted. -// -// If, at any moment, SignedHeader or ValidatorSet are not found by the primary -// provider, provider.ErrSignedHeaderNotFound / -// provider.ErrValidatorSetNotFound error is returned. -func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { - if newHeader.Height <= 0 { - return errors.New("negative or zero height") - } - - // Check if newHeader already verified. - h, err := c.TrustedHeader(newHeader.Height) - if err == nil { - // Make sure it's the same header. - if !bytes.Equal(h.Hash(), newHeader.Hash()) { - return fmt.Errorf("existing trusted header %X does not match newHeader %X", h.Hash(), newHeader.Hash()) - } - c.logger.Info("Header has already been verified", - "height", newHeader.Height, "hash", hash2str(newHeader.Hash())) - return nil - } - - return c.verifyHeader(newHeader, newVals, now) -} - -func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { - c.logger.Info("VerifyHeader", "height", newHeader.Height, "hash", hash2str(newHeader.Hash()), - "vals", hash2str(newVals.Hash())) - - var err error - - // 1) If going forward, perform either bisection or sequential verification. - if newHeader.Height >= c.latestTrustedHeader.Height { - switch c.verificationMode { - case sequential: - err = c.sequence(c.latestTrustedHeader, newHeader, newVals, now) - case skipping: - err = c.bisection(c.latestTrustedHeader, c.latestTrustedVals, newHeader, newVals, now) - default: - panic(fmt.Sprintf("Unknown verification mode: %b", c.verificationMode)) - } - } else { - // 2) If verifying before the first trusted header, perform backwards - // verification. - var ( - closestHeader *types.SignedHeader - firstHeaderHeight int64 - ) - firstHeaderHeight, err = c.FirstTrustedHeight() - if err != nil { - return fmt.Errorf("can't get first header height: %w", err) - } - if newHeader.Height < firstHeaderHeight { - closestHeader, err = c.TrustedHeader(firstHeaderHeight) - if err != nil { - return fmt.Errorf("can't get first signed header: %w", err) - } - if HeaderExpired(closestHeader, c.trustingPeriod, now) { - closestHeader = c.latestTrustedHeader - } - err = c.backwards(closestHeader, newHeader, now) - } else { - // 3) OR if between trusted headers where the nearest has not expired, - // perform bisection verification, else backwards. - closestHeader, err = c.trustedStore.SignedHeaderBefore(newHeader.Height) - if err != nil { - return fmt.Errorf("can't get signed header before height %d: %w", newHeader.Height, err) - } - var closestValidatorSet *types.ValidatorSet - if c.verificationMode == sequential || HeaderExpired(closestHeader, c.trustingPeriod, now) { - err = c.backwards(c.latestTrustedHeader, newHeader, now) - } else { - closestValidatorSet, _, err = c.TrustedValidatorSet(closestHeader.Height) - if err != nil { - return fmt.Errorf("can't get validator set at height %d: %w", closestHeader.Height, err) - } - err = c.bisection(closestHeader, closestValidatorSet, newHeader, newVals, now) - } - } - } - if err != nil { - c.logger.Error("Can't verify", "err", err) - return err - } - // 4) Compare header with other witnesses - if err := c.compareNewHeaderWithWitnesses(newHeader); err != nil { - c.logger.Error("Error when comparing new header with witnesses", "err", err) - return err - } - - // 5) Once verified, save and return - return c.updateTrustedHeaderAndVals(newHeader, newVals) -} - -// see VerifyHeader -func (c *Client) sequence( - initiallyTrustedHeader *types.SignedHeader, - newHeader *types.SignedHeader, - newVals *types.ValidatorSet, - now time.Time) error { - - var ( - trustedHeader = initiallyTrustedHeader - - interimHeader *types.SignedHeader - interimVals *types.ValidatorSet - - err error - ) - - for height := initiallyTrustedHeader.Height + 1; height <= newHeader.Height; height++ { - // 1) Fetch interim headers and vals if needed. - if height == newHeader.Height { // last header - interimHeader, interimVals = newHeader, newVals - } else { // intermediate headers - interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(height) - if err != nil { - return err - } - } - - // 2) Verify them - c.logger.Debug("Verify newHeader against trustedHeader", - "trustedHeight", trustedHeader.Height, - "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) - - err = VerifyAdjacent(c.chainID, trustedHeader, interimHeader, interimVals, - c.trustingPeriod, now, c.maxClockDrift) - if err != nil { - err = fmt.Errorf("verify adjacent from #%d to #%d failed: %w", - trustedHeader.Height, interimHeader.Height, err) - - switch errors.Unwrap(err).(type) { - case ErrInvalidHeader: - c.logger.Error("primary sent invalid header -> replacing", "err", err) - replaceErr := c.replacePrimaryProvider() - if replaceErr != nil { - c.logger.Error("Can't replace primary", "err", replaceErr) - return err // return original error - } - // attempt to verify header again - height-- - continue - default: - return err - } - } - - // 3) Update trustedHeader - trustedHeader = interimHeader - } - - return nil -} - -// see VerifyHeader -// Bisection finds the middle header between a trusted and new header, reiterating the action until it -// verifies a header. A cache of headers requested by the primary is kept such that when a -// verification is made, and the light client tries again to verify the new header in the middle, -// the light client does not need to ask for all the same headers again. -func (c *Client) bisection( - initiallyTrustedHeader *types.SignedHeader, - initiallyTrustedVals *types.ValidatorSet, - newHeader *types.SignedHeader, - newVals *types.ValidatorSet, - now time.Time) error { - - type headerSet struct { - sh *types.SignedHeader - valSet *types.ValidatorSet - } - - var ( - headerCache = []headerSet{{newHeader, newVals}} - depth = 0 - - trustedHeader = initiallyTrustedHeader - trustedVals = initiallyTrustedVals - ) - - for { - c.logger.Debug("Verify newHeader against trustedHeader", - "trustedHeight", trustedHeader.Height, - "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", headerCache[depth].sh.Height, - "newHash", hash2str(headerCache[depth].sh.Hash())) - - err := Verify(c.chainID, trustedHeader, trustedVals, headerCache[depth].sh, headerCache[depth].valSet, - c.trustingPeriod, now, c.maxClockDrift, c.trustLevel) - switch err.(type) { - case nil: - // Have we verified the last header - if depth == 0 { - return nil - } - // If not, update the lower bound to the previous upper bound - trustedHeader, trustedVals = headerCache[depth].sh, headerCache[depth].valSet - // Remove the untrusted header at the lower bound in the header cache - it's no longer useful - headerCache = headerCache[:depth] - // Reset the cache depth so that we start from the upper bound again - depth = 0 - - case ErrNewValSetCantBeTrusted: - // do add another header to the end of the cache - if depth == len(headerCache)-1 { - pivotHeight := (headerCache[depth].sh.Height + trustedHeader. - Height) * bisectionNumerator / bisectionDenominator - interimHeader, interimVals, err := c.fetchHeaderAndValsAtHeight(pivotHeight) - if err != nil { - return err - } - headerCache = append(headerCache, headerSet{interimHeader, interimVals}) - } - depth++ - - case ErrInvalidHeader: - c.logger.Error("primary sent invalid header -> replacing", "err", err) - replaceErr := c.replacePrimaryProvider() - if replaceErr != nil { - c.logger.Error("Can't replace primary", "err", replaceErr) - // return original error - return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", - trustedHeader.Height, headerCache[depth].sh.Height, err) - } - // attempt to verify the header again - continue - - default: - return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", - trustedHeader.Height, headerCache[depth].sh.Height, err) - } - } -} - -// LastTrustedHeight returns a last trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) LastTrustedHeight() (int64, error) { - return c.trustedStore.LastSignedHeaderHeight() -} - -// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) FirstTrustedHeight() (int64, error) { - return c.trustedStore.FirstSignedHeaderHeight() -} - -// ChainID returns the chain ID the light client was configured with. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) ChainID() string { - return c.chainID -} - -// Primary returns the primary provider. -// -// NOTE: provider may be not safe for concurrent access. -func (c *Client) Primary() provider.Provider { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - return c.primary -} - -// Witnesses returns the witness providers. -// -// NOTE: providers may be not safe for concurrent access. -func (c *Client) Witnesses() []provider.Provider { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - return c.witnesses -} - -// Cleanup removes all the data (headers and validator sets) stored. Note: the -// client must be stopped at this point. -func (c *Client) Cleanup() error { - c.logger.Info("Removing all the data") - c.latestTrustedHeader = nil - c.latestTrustedVals = nil - return c.trustedStore.Prune(0) -} - -// cleanupAfter deletes all headers & validator sets after +height+. It also -// resets latestTrustedHeader to the latest header. -func (c *Client) cleanupAfter(height int64) error { - prevHeight := c.latestTrustedHeader.Height - - for { - h, err := c.trustedStore.SignedHeaderBefore(prevHeight) - if err == store.ErrSignedHeaderNotFound || (h != nil && h.Height <= height) { - break - } else if err != nil { - return fmt.Errorf("failed to get header before %d: %w", prevHeight, err) - } - - err = c.trustedStore.DeleteSignedHeaderAndValidatorSet(h.Height) - if err != nil { - c.logger.Error("can't remove a trusted header & validator set", "err", err, - "height", h.Height) - } - - prevHeight = h.Height - } - - c.latestTrustedHeader = nil - c.latestTrustedVals = nil - err := c.restoreTrustedHeaderAndVals() - if err != nil { - return err - } - - return nil -} - -func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.ValidatorSet) error { - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return fmt.Errorf("expected validator's hash %X, but got %X", h.ValidatorsHash, vals.Hash()) - } - - if err := c.trustedStore.SaveSignedHeaderAndValidatorSet(h, vals); err != nil { - return fmt.Errorf("failed to save trusted header: %w", err) - } - - if c.pruningSize > 0 { - if err := c.trustedStore.Prune(c.pruningSize); err != nil { - return fmt.Errorf("prune: %w", err) - } - } - - if c.latestTrustedHeader == nil || h.Height > c.latestTrustedHeader.Height { - c.latestTrustedHeader = h - c.latestTrustedVals = vals - } - - return nil -} - -// fetch header and validators for the given height (0 - latest) from primary -// provider. -func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.ValidatorSet, error) { - h, err := c.signedHeaderFromPrimary(height) - if err != nil { - return nil, nil, fmt.Errorf("failed to obtain the header #%d: %w", height, err) - } - vals, err := c.validatorSetFromPrimary(height) - if err != nil { - return nil, nil, fmt.Errorf("failed to obtain the vals #%d: %w", height, err) - } - return h, vals, nil -} - -// backwards verification (see VerifyHeaderBackwards func in the spec) verifies -// headers before a trusted header. If a sent header is invalid the primary is -// replaced with another provider and the operation is repeated. -func (c *Client) backwards( - initiallyTrustedHeader *types.SignedHeader, - newHeader *types.SignedHeader, - now time.Time) error { - - if HeaderExpired(initiallyTrustedHeader, c.trustingPeriod, now) { - c.logger.Error("Header Expired") - return ErrOldHeaderExpired{initiallyTrustedHeader.Time.Add(c.trustingPeriod), now} - } - - var ( - trustedHeader = initiallyTrustedHeader - interimHeader *types.SignedHeader - err error - ) - - for trustedHeader.Height > newHeader.Height { - interimHeader, err = c.signedHeaderFromPrimary(trustedHeader.Height - 1) - if err != nil { - return fmt.Errorf("failed to obtain the header at height #%d: %w", trustedHeader.Height-1, err) - } - c.logger.Debug("Verify newHeader against trustedHeader", - "trustedHeight", trustedHeader.Height, - "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) - if err := VerifyBackwards(c.chainID, interimHeader, trustedHeader); err != nil { - c.logger.Error("primary sent invalid header -> replacing", "err", err) - if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { - c.logger.Error("Can't replace primary", "err", replaceErr) - // return original error - return fmt.Errorf("verify backwards from %d to %d failed: %w", - trustedHeader.Height, interimHeader.Height, err) - } - } - - trustedHeader = interimHeader - } - - // Initially trusted header might have expired at this point. - if HeaderExpired(initiallyTrustedHeader, c.trustingPeriod, now) { - return ErrOldHeaderExpired{initiallyTrustedHeader.Time.Add(c.trustingPeriod), now} - } - - return nil -} - -// compare header with all witnesses provided. -func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - - // 1. Make sure AT LEAST ONE witness returns the same header. - headerMatched := false - witnessesToRemove := make([]int, 0) - for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { - if len(c.witnesses) == 0 { - return errNoWitnesses{} - } - - for i, witness := range c.witnesses { - altH, err := witness.SignedHeader(h.Height) - if err != nil { - c.logger.Error("Failed to get a header from witness", "height", h.Height, "witness", witness) - continue - } - - if err = altH.ValidateBasic(c.chainID); err != nil { - c.logger.Error("Witness sent us incorrect header", "err", err, "witness", witness) - witnessesToRemove = append(witnessesToRemove, i) - continue - } - - if !bytes.Equal(h.Hash(), altH.Hash()) { - if err = c.latestTrustedVals.VerifyCommitTrusting(c.chainID, altH.Commit.BlockID, - altH.Height, altH.Commit, c.trustLevel); err != nil { - c.logger.Error("Witness sent us incorrect header", "err", err, "witness", witness) - witnessesToRemove = append(witnessesToRemove, i) - continue - } - - // TODO: send the diverged headers to primary && all witnesses - - return fmt.Errorf( - "header hash %X does not match one %X from the witness %v", - h.Hash(), altH.Hash(), witness) - } - - headerMatched = true - } - - for _, idx := range witnessesToRemove { - c.removeWitness(idx) - } - witnessesToRemove = make([]int, 0) - - if headerMatched { - return nil - } - - // 2. Otherwise, sleep - time.Sleep(backoffTimeout(attempt)) - } - - return errors.New("awaiting response from all witnesses exceeded dropout time") -} - -// NOTE: requires a providerMutex locked. -func (c *Client) removeWitness(idx int) { - switch len(c.witnesses) { - case 0: - panic(fmt.Sprintf("wanted to remove %d element from empty witnesses slice", idx)) - case 1: - c.witnesses = make([]provider.Provider, 0) - default: - c.witnesses[idx] = c.witnesses[len(c.witnesses)-1] - c.witnesses = c.witnesses[:len(c.witnesses)-1] - } -} - -// Update attempts to advance the state by downloading the latest header and -// comparing it with the existing one. It returns a new header on a successful -// update. Otherwise, it returns nil (plus an error, if any). -func (c *Client) Update(now time.Time) (*types.SignedHeader, error) { - lastTrustedHeight, err := c.LastTrustedHeight() - if err != nil { - return nil, fmt.Errorf("can't get last trusted height: %w", err) - } - - if lastTrustedHeight == -1 { - // no headers yet => wait - return nil, nil - } - - latestHeader, latestVals, err := c.fetchHeaderAndValsAtHeight(0) - if err != nil { - return nil, err - } - - if latestHeader.Height > lastTrustedHeight { - err = c.VerifyHeader(latestHeader, latestVals, now) - if err != nil { - return nil, err - } - c.logger.Info("Advanced to new state", "height", latestHeader.Height, "hash", hash2str(latestHeader.Hash())) - return latestHeader, nil - } - - return nil, nil -} - -// replaceProvider takes the first alternative provider and promotes it as the -// primary provider. -func (c *Client) replacePrimaryProvider() error { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - - if len(c.witnesses) <= 1 { - return errNoWitnesses{} - } - c.primary = c.witnesses[0] - c.witnesses = c.witnesses[1:] - c.logger.Info("New primary", "p", c.primary) - - return nil -} - -// signedHeaderFromPrimary retrieves the SignedHeader from the primary provider -// at the specified height. Handles dropout by the primary provider by swapping -// with an alternative provider. -func (c *Client) signedHeaderFromPrimary(height int64) (*types.SignedHeader, error) { - for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { - c.providerMutex.Lock() - h, err := c.primary.SignedHeader(height) - c.providerMutex.Unlock() - if err == nil { - // sanity check - if height > 0 && h.Height != height { - return nil, fmt.Errorf("expected %d height, got %d", height, h.Height) - } - return h, nil - } - if err == provider.ErrSignedHeaderNotFound { - return nil, err - } - c.logger.Error("Failed to get signed header from primary", "attempt", attempt, "err", err) - time.Sleep(backoffTimeout(attempt)) - } - - c.logger.Info("Primary is unavailable. Replacing with the first witness") - err := c.replacePrimaryProvider() - if err != nil { - return nil, err - } - - return c.signedHeaderFromPrimary(height) -} - -// validatorSetFromPrimary retrieves the ValidatorSet from the primary provider -// at the specified height. Handles dropout by the primary provider after 5 -// attempts by replacing it with an alternative provider. -func (c *Client) validatorSetFromPrimary(height int64) (*types.ValidatorSet, error) { - for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { - c.providerMutex.Lock() - vals, err := c.primary.ValidatorSet(height) - c.providerMutex.Unlock() - if err == nil || err == provider.ErrValidatorSetNotFound { - return vals, err - } - c.logger.Error("Failed to get validator set from primary", "attempt", attempt, "err", err) - time.Sleep(backoffTimeout(attempt)) - } - - c.logger.Info("Primary is unavailable. Replacing with the first witness") - err := c.replacePrimaryProvider() - if err != nil { - return nil, err - } - - return c.validatorSetFromPrimary(height) -} - -// exponential backoff (with jitter) -// 0.5s -> 2s -> 4.5s -> 8s -> 12.5 with 1s variation -func backoffTimeout(attempt uint16) time.Duration { - return time.Duration(500*attempt*attempt)*time.Millisecond + time.Duration(rand.Intn(1000))*time.Millisecond -} - -func hash2str(hash []byte) string { - return fmt.Sprintf("%X", hash) -} diff --git a/lite2/errors.go b/lite2/errors.go deleted file mode 100644 index 7bc70f698..000000000 --- a/lite2/errors.go +++ /dev/null @@ -1,48 +0,0 @@ -package lite - -import ( - "fmt" - "time" - - "github.com/tendermint/tendermint/types" -) - -// ErrOldHeaderExpired means the old (trusted) header has expired according to -// the given trustingPeriod and current time. If so, the light client must be -// reset subjectively. -type ErrOldHeaderExpired struct { - At time.Time - Now time.Time -} - -func (e ErrOldHeaderExpired) Error() string { - return fmt.Sprintf("old header has expired at %v (now: %v)", e.At, e.Now) -} - -// ErrNewValSetCantBeTrusted means the new validator set cannot be trusted -// because < 1/3rd (+trustLevel+) of the old validator set has signed. -type ErrNewValSetCantBeTrusted struct { - Reason types.ErrNotEnoughVotingPowerSigned -} - -func (e ErrNewValSetCantBeTrusted) Error() string { - return fmt.Sprintf("cant trust new val set: %v", e.Reason) -} - -// ErrInvalidHeader means the header either failed the basic validation or -// commit is not signed by 2/3+. -type ErrInvalidHeader struct { - Reason error -} - -func (e ErrInvalidHeader) Error() string { - return fmt.Sprintf("invalid header: %v", e.Reason) -} - -// errNoWitnesses means that there are not enough witnesses connected to -// continue running the light client. -type errNoWitnesses struct{} - -func (e errNoWitnesses) Error() string { - return fmt.Sprint("no witnesses connected. please reset light client") -} diff --git a/lite2/provider/errors.go b/lite2/provider/errors.go deleted file mode 100644 index 05a242acd..000000000 --- a/lite2/provider/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package provider - -import "errors" - -var ( - // ErrSignedHeaderNotFound is returned when a provider can't find the - // requested header. - ErrSignedHeaderNotFound = errors.New("signed header not found") - // ErrValidatorSetNotFound is returned when a provider can't find the - // requested validator set. - ErrValidatorSetNotFound = errors.New("validator set not found") -) diff --git a/lite2/provider/http/http.go b/lite2/provider/http/http.go deleted file mode 100644 index a7d8534b4..000000000 --- a/lite2/provider/http/http.go +++ /dev/null @@ -1,139 +0,0 @@ -package http - -import ( - "errors" - "fmt" - "strings" - - "github.com/tendermint/tendermint/lite2/provider" - rpcclient "github.com/tendermint/tendermint/rpc/client" - rpchttp "github.com/tendermint/tendermint/rpc/client/http" - "github.com/tendermint/tendermint/types" -) - -// SignStatusClient combines a SignClient and StatusClient. -type SignStatusClient interface { - rpcclient.SignClient - rpcclient.StatusClient - // Remote returns the remote network address in a string form. - Remote() string -} - -// http provider uses an RPC client (or SignStatusClient more generally) to -// obtain the necessary information. -type http struct { - SignStatusClient // embed so interface can be converted to SignStatusClient for tests - chainID string -} - -// New creates a HTTP provider, which is using the rpchttp.HTTP client under the -// hood. If no scheme is provided in the remote URL, http will be used by default. -func New(chainID, remote string) (provider.Provider, error) { - // ensure URL scheme is set (default HTTP) when not provided - if !strings.Contains(remote, "://") { - remote = "http://" + remote - } - - httpClient, err := rpchttp.New(remote, "/websocket") - if err != nil { - return nil, err - } - - return NewWithClient(chainID, httpClient), nil -} - -// NewWithClient allows you to provide custom SignStatusClient. -func NewWithClient(chainID string, client SignStatusClient) provider.Provider { - return &http{ - SignStatusClient: client, - chainID: chainID, - } -} - -// ChainID returns a chainID this provider was configured with. -func (p *http) ChainID() string { - return p.chainID -} - -func (p *http) String() string { - return fmt.Sprintf("http{%s}", p.Remote()) -} - -// SignedHeader fetches a SignedHeader at the given height and checks the -// chainID matches. -func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) { - h, err := validateHeight(height) - if err != nil { - return nil, err - } - - commit, err := p.SignStatusClient.Commit(h) - if err != nil { - // TODO: standartise errors on the RPC side - if strings.Contains(err.Error(), "height must be less than or equal") { - return nil, provider.ErrSignedHeaderNotFound - } - return nil, err - } - - if commit.Header == nil { - return nil, errors.New("header is nil") - } - - // Verify we're still on the same chain. - if p.chainID != commit.Header.ChainID { - return nil, fmt.Errorf("expected chainID %s, got %s", p.chainID, commit.Header.ChainID) - } - - return &commit.SignedHeader, nil -} - -// ValidatorSet fetches a ValidatorSet at the given height. Multiple HTTP -// requests might be required if the validator set size is over 100. -func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { - h, err := validateHeight(height) - if err != nil { - return nil, err - } - - const maxPerPage = 100 - res, err := p.SignStatusClient.Validators(h, 0, maxPerPage) - if err != nil { - // TODO: standartise errors on the RPC side - if strings.Contains(err.Error(), "height must be less than or equal") { - return nil, provider.ErrValidatorSetNotFound - } - return nil, err - } - - var ( - vals = res.Validators - page = 1 - ) - - // Check if there are more validators. - for len(res.Validators) == maxPerPage { - res, err = p.SignStatusClient.Validators(h, page, maxPerPage) - if err != nil { - return nil, err - } - if len(res.Validators) > 0 { - vals = append(vals, res.Validators...) - } - page++ - } - - return types.NewValidatorSet(vals), nil -} - -func validateHeight(height int64) (*int64, error) { - if height < 0 { - return nil, fmt.Errorf("expected height >= 0, got height %d", height) - } - - h := &height - if height == 0 { - h = nil - } - return h, nil -} diff --git a/lite2/provider/mock/deadmock.go b/lite2/provider/mock/deadmock.go deleted file mode 100644 index 77c474411..000000000 --- a/lite2/provider/mock/deadmock.go +++ /dev/null @@ -1,33 +0,0 @@ -package mock - -import ( - "errors" - - "github.com/tendermint/tendermint/lite2/provider" - "github.com/tendermint/tendermint/types" -) - -type deadMock struct { - chainID string -} - -// NewDeadMock creates a mock provider that always errors. -func NewDeadMock(chainID string) provider.Provider { - return &deadMock{chainID: chainID} -} - -func (p *deadMock) ChainID() string { - return p.chainID -} - -func (p *deadMock) String() string { - return "deadMock" -} - -func (p *deadMock) SignedHeader(height int64) (*types.SignedHeader, error) { - return nil, errors.New("no response from provider") -} - -func (p *deadMock) ValidatorSet(height int64) (*types.ValidatorSet, error) { - return nil, errors.New("no response from provider") -} diff --git a/lite2/provider/mock/mock.go b/lite2/provider/mock/mock.go deleted file mode 100644 index 7ff7bc9a1..000000000 --- a/lite2/provider/mock/mock.go +++ /dev/null @@ -1,64 +0,0 @@ -package mock - -import ( - "fmt" - "strings" - - "github.com/tendermint/tendermint/lite2/provider" - "github.com/tendermint/tendermint/types" -) - -type mock struct { - chainID string - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet -} - -// New creates a mock provider with the given set of headers and validator -// sets. -func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) provider.Provider { - return &mock{ - chainID: chainID, - headers: headers, - vals: vals, - } -} - -// ChainID returns the blockchain ID. -func (p *mock) ChainID() string { - return p.chainID -} - -func (p *mock) String() string { - var headers strings.Builder - for _, h := range p.headers { - fmt.Fprintf(&headers, " %d:%X", h.Height, h.Hash()) - } - - var vals strings.Builder - for _, v := range p.vals { - fmt.Fprintf(&vals, " %X", v.Hash()) - } - - return fmt.Sprintf("mock{headers: %s, vals: %v}", headers.String(), vals.String()) -} - -func (p *mock) SignedHeader(height int64) (*types.SignedHeader, error) { - if height == 0 && len(p.headers) > 0 { - return p.headers[int64(len(p.headers))], nil - } - if _, ok := p.headers[height]; ok { - return p.headers[height], nil - } - return nil, provider.ErrSignedHeaderNotFound -} - -func (p *mock) ValidatorSet(height int64) (*types.ValidatorSet, error) { - if height == 0 && len(p.vals) > 0 { - return p.vals[int64(len(p.vals))], nil - } - if _, ok := p.vals[height]; ok { - return p.vals[height], nil - } - return nil, provider.ErrValidatorSetNotFound -} diff --git a/lite2/provider/provider.go b/lite2/provider/provider.go deleted file mode 100644 index 773e17e32..000000000 --- a/lite2/provider/provider.go +++ /dev/null @@ -1,35 +0,0 @@ -package provider - -import ( - "github.com/tendermint/tendermint/types" -) - -// Provider provides information for the lite client to sync (verification -// happens in the client). -type Provider interface { - // ChainID returns the blockchain ID. - ChainID() string - - // SignedHeader returns the SignedHeader that corresponds to the given - // height. - // - // 0 - the latest. - // height must be >= 0. - // - // If the provider fails to fetch the SignedHeader due to the IO or other - // issues, an error will be returned. - // If there's no SignedHeader for the given height, ErrSignedHeaderNotFound - // error is returned. - SignedHeader(height int64) (*types.SignedHeader, error) - - // ValidatorSet returns the ValidatorSet that corresponds to height. - // - // 0 - the latest. - // height must be >= 0. - // - // If the provider fails to fetch the ValidatorSet due to the IO or other - // issues, an error will be returned. - // If there's no ValidatorSet for the given height, ErrValidatorSetNotFound - // error is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) -} diff --git a/lite2/rpc/client.go b/lite2/rpc/client.go deleted file mode 100644 index 9155220c9..000000000 --- a/lite2/rpc/client.go +++ /dev/null @@ -1,482 +0,0 @@ -package rpc - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/tendermint/tendermint/crypto/merkle" - tmbytes "github.com/tendermint/tendermint/libs/bytes" - service "github.com/tendermint/tendermint/libs/service" - lite "github.com/tendermint/tendermint/lite2" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tendermint/types" -) - -var errNegOrZeroHeight = errors.New("negative or zero height") - -// Client is an RPC client, which uses lite#Client to verify data (if it can be -// proved!). -type Client struct { - service.BaseService - - next rpcclient.Client - lc *lite.Client - prt *merkle.ProofRuntime -} - -var _ rpcclient.Client = (*Client)(nil) - -// NewClient returns a new client. -func NewClient(next rpcclient.Client, lc *lite.Client) *Client { - c := &Client{ - next: next, - lc: lc, - prt: defaultProofRuntime(), - } - c.BaseService = *service.NewBaseService(nil, "Client", c) - return c -} - -func (c *Client) OnStart() error { - if !c.next.IsRunning() { - return c.next.Start() - } - return nil -} - -func (c *Client) OnStop() { - if c.next.IsRunning() { - c.next.Stop() - } -} - -func (c *Client) Status() (*ctypes.ResultStatus, error) { - return c.next.Status() -} - -func (c *Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return c.next.ABCIInfo() -} - -func (c *Client) ABCIQuery(path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) -} - -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions. -// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store. -func (c *Client) ABCIQueryWithOptions(path string, data tmbytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - - res, err := c.next.ABCIQueryWithOptions(path, data, opts) - if err != nil { - return nil, err - } - resp := res.Response - - // Validate the response. - if resp.IsErr() { - return nil, fmt.Errorf("err response code: %v", resp.Code) - } - if len(resp.Key) == 0 || resp.Proof == nil { - return nil, errors.New("empty tree") - } - if resp.Height <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - // NOTE: AppHash for height H is in header H+1. - h, err := c.updateLiteClientIfNeededTo(resp.Height + 1) - if err != nil { - return nil, err - } - - // Validate the value proof against the trusted header. - if resp.Value != nil { - // Value exists - // XXX How do we encode the key into a string... - storeName, err := parseQueryStorePath(path) - if err != nil { - return nil, err - } - kp := merkle.KeyPath{} - kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL) - kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL) - err = c.prt.VerifyValue(resp.Proof, h.AppHash, kp.String(), resp.Value) - if err != nil { - return nil, fmt.Errorf("verify value proof: %w", err) - } - return &ctypes.ResultABCIQuery{Response: resp}, nil - } - - // OR validate the ansence proof against the trusted header. - // XXX How do we encode the key into a string... - err = c.prt.VerifyAbsence(resp.Proof, h.AppHash, string(resp.Key)) - if err != nil { - return nil, fmt.Errorf("verify absence proof: %w", err) - } - return &ctypes.ResultABCIQuery{Response: resp}, nil -} - -func (c *Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return c.next.BroadcastTxCommit(tx) -} - -func (c *Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.next.BroadcastTxAsync(tx) -} - -func (c *Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.next.BroadcastTxSync(tx) -} - -func (c *Client) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { - return c.next.UnconfirmedTxs(limit) -} - -func (c *Client) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { - return c.next.NumUnconfirmedTxs() -} - -func (c *Client) NetInfo() (*ctypes.ResultNetInfo, error) { - return c.next.NetInfo() -} - -func (c *Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - return c.next.DumpConsensusState() -} - -func (c *Client) ConsensusState() (*ctypes.ResultConsensusState, error) { - return c.next.ConsensusState() -} - -func (c *Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { - res, err := c.next.ConsensusParams(height) - if err != nil { - return nil, err - } - - // Validate res. - if err := res.ConsensusParams.Validate(); err != nil { - return nil, err - } - if res.BlockHeight <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.BlockHeight) - if err != nil { - return nil, err - } - - // Verify hash. - if cH, tH := res.ConsensusParams.Hash(), h.ConsensusHash; !bytes.Equal(cH, tH) { - return nil, fmt.Errorf("params hash %X does not match trusted hash %X", - cH, tH) - } - - return res, nil -} - -func (c *Client) Health() (*ctypes.ResultHealth, error) { - return c.next.Health() -} - -// BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header -// returned. -func (c *Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - res, err := c.next.BlockchainInfo(minHeight, maxHeight) - if err != nil { - return nil, err - } - - // Validate res. - for i, meta := range res.BlockMetas { - if meta == nil { - return nil, fmt.Errorf("nil block meta %d", i) - } - if err := meta.ValidateBasic(); err != nil { - return nil, fmt.Errorf("invalid block meta %d: %w", i, err) - } - } - - // Update the light client if we're behind. - if len(res.BlockMetas) > 0 { - lastHeight := res.BlockMetas[len(res.BlockMetas)-1].Header.Height - if _, err := c.updateLiteClientIfNeededTo(lastHeight); err != nil { - return nil, err - } - } - - // Verify each of the BlockMetas. - for _, meta := range res.BlockMetas { - h, err := c.lc.TrustedHeader(meta.Header.Height) - if err != nil { - return nil, fmt.Errorf("trusted header %d: %w", meta.Header.Height, err) - } - if bmH, tH := meta.Header.Hash(), h.Hash(); !bytes.Equal(bmH, tH) { - return nil, fmt.Errorf("block meta header %X does not match with trusted header %X", - bmH, tH) - } - } - - return res, nil -} - -func (c *Client) Genesis() (*ctypes.ResultGenesis, error) { - return c.next.Genesis() -} - -// Block calls rpcclient#Block and then verifies the result. -func (c *Client) Block(height *int64) (*ctypes.ResultBlock, error) { - res, err := c.next.Block(height) - if err != nil { - return nil, err - } - - // Validate res. - if err := res.BlockID.ValidateBasic(); err != nil { - return nil, err - } - if err := res.Block.ValidateBasic(); err != nil { - return nil, err - } - if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { - return nil, fmt.Errorf("blockID %X does not match with block %X", - bmH, bH) - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.Block.Height) - if err != nil { - return nil, err - } - - // Verify block. - if bH, tH := res.Block.Hash(), h.Hash(); !bytes.Equal(bH, tH) { - return nil, fmt.Errorf("block header %X does not match with trusted header %X", - bH, tH) - } - - return res, nil -} - -func (c *Client) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - res, err := c.next.BlockResults(height) - if err != nil { - return nil, err - } - - // Validate res. - if res.Height <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.Height + 1) - if err != nil { - return nil, err - } - - // Verify block results. - results := types.NewResults(res.TxsResults) - if rH, tH := results.Hash(), h.LastResultsHash; !bytes.Equal(rH, tH) { - return nil, fmt.Errorf("last results %X does not match with trusted last results %X", - rH, tH) - } - - return res, nil -} - -func (c *Client) Commit(height *int64) (*ctypes.ResultCommit, error) { - res, err := c.next.Commit(height) - if err != nil { - return nil, err - } - - // Validate res. - if err := res.SignedHeader.ValidateBasic(c.lc.ChainID()); err != nil { - return nil, err - } - if res.Height <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.Height) - if err != nil { - return nil, err - } - - // Verify commit. - if rH, tH := res.Hash(), h.Hash(); !bytes.Equal(rH, tH) { - return nil, fmt.Errorf("header %X does not match with trusted header %X", - rH, tH) - } - - return res, nil -} - -// Tx calls rpcclient#Tx method and then verifies the proof if such was -// requested. -func (c *Client) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - res, err := c.next.Tx(hash, prove) - if err != nil || !prove { - return res, err - } - - // Validate res. - if res.Height <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.Height) - if err != nil { - return nil, err - } - - // Validate the proof. - return res, res.Proof.Validate(h.DataHash) -} - -func (c *Client) TxSearch(query string, prove bool, page, perPage int, orderBy string) ( - *ctypes.ResultTxSearch, error) { - return c.next.TxSearch(query, prove, page, perPage, orderBy) -} - -// Validators fetches and verifies validators. -// -// WARNING: only full validator sets are verified (when length of validators is -// less than +perPage+. +perPage+ default is 30, max is 100). -func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - res, err := c.next.Validators(height, page, perPage) - if err != nil { - return nil, err - } - - // Validate res. - if res.BlockHeight <= 0 { - return nil, errNegOrZeroHeight - } - - // Update the light client if we're behind. - h, err := c.updateLiteClientIfNeededTo(res.BlockHeight) - if err != nil { - return nil, err - } - - // Verify validators. - if res.Count <= res.Total { - if rH, tH := types.NewValidatorSet(res.Validators).Hash(), h.ValidatorsHash; !bytes.Equal(rH, tH) { - return nil, fmt.Errorf("validators %X does not match with trusted validators %X", - rH, tH) - } - } - - return res, nil -} - -func (c *Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - return c.next.BroadcastEvidence(ev) -} - -func (c *Client) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - return c.next.Subscribe(ctx, subscriber, query, outCapacity...) -} - -func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { - return c.next.Unsubscribe(ctx, subscriber, query) -} - -func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { - return c.next.UnsubscribeAll(ctx, subscriber) -} - -func (c *Client) updateLiteClientIfNeededTo(height int64) (*types.SignedHeader, error) { - h, err := c.lc.VerifyHeaderAtHeight(height, time.Now()) - if err != nil { - return nil, fmt.Errorf("failed to update light client to %d: %w", height, err) - } - return h, nil -} - -func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { - c.prt.RegisterOpDecoder(typ, dec) -} - -// SubscribeWS subscribes for events using the given query and remote address as -// a subscriber, but does not verify responses (UNSAFE)! -// TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { - out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) - if err != nil { - return nil, err - } - - go func() { - for { - select { - case resultEvent := <-out: - // We should have a switch here that performs a validation - // depending on the event's type. - ctx.WSConn.TryWriteRPCResponse( - rpctypes.NewRPCSuccessResponse( - ctx.WSConn.Codec(), - rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), - resultEvent, - )) - case <-c.Quit(): - return - } - } - }() - - return &ctypes.ResultSubscribe{}, nil -} - -// UnsubscribeWS calls original client's Unsubscribe using remote address as a -// subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { - err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -// UnsubscribeAllWS calls original client's UnsubscribeAll using remote address -// as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { - err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -func parseQueryStorePath(path string) (storeName string, err error) { - if !strings.HasPrefix(path, "/") { - return "", errors.New("expected path to start with /") - } - - paths := strings.SplitN(path[1:], "/", 3) - switch { - case len(paths) != 3: - return "", errors.New("expected format like /store//key") - case paths[0] != "store": - return "", errors.New("expected format like /store//key") - case paths[2] != "key": - return "", errors.New("expected format like /store//key") - } - - return paths[1], nil -} diff --git a/lite2/rpc/proof.go b/lite2/rpc/proof.go deleted file mode 100644 index 51e835f7a..000000000 --- a/lite2/rpc/proof.go +++ /dev/null @@ -1,14 +0,0 @@ -package rpc - -import ( - "github.com/tendermint/tendermint/crypto/merkle" -) - -func defaultProofRuntime() *merkle.ProofRuntime { - prt := merkle.NewProofRuntime() - prt.RegisterOpDecoder( - merkle.ProofOpSimpleValue, - merkle.SimpleValueOpDecoder, - ) - return prt -} diff --git a/lite2/rpc/query_test.go b/lite2/rpc/query_test.go deleted file mode 100644 index 93f70c728..000000000 --- a/lite2/rpc/query_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package rpc - -//import ( -// "fmt" -// "os" -// "testing" -// "time" - -// "github.com/stretchr/testify/assert" -// "github.com/stretchr/testify/require" - -// "github.com/tendermint/tendermint/abci/example/kvstore" -// "github.com/tendermint/tendermint/crypto/merkle" -// "github.com/tendermint/tendermint/lite" -// certclient "github.com/tendermint/tendermint/lite/client" -// nm "github.com/tendermint/tendermint/node" -// "github.com/tendermint/tendermint/rpc/client" -// rpctest "github.com/tendermint/tendermint/rpc/test" -// "github.com/tendermint/tendermint/types" -//) - -//var node *nm.Node -//var chainID = "tendermint_test" // TODO use from config. -////nolint:unused -//var waitForEventTimeout = 5 * time.Second - -//// TODO fix tests!! - -//func TestMain(m *testing.M) { -// app := kvstore.NewKVStoreApplication() -// node = rpctest.StartTendermint(app) - -// code := m.Run() - -// rpctest.StopTendermint(node) -// os.Exit(code) -//} - -//func kvstoreTx(k, v []byte) []byte { -// return []byte(fmt.Sprintf("%s=%s", k, v)) -//} - -//// TODO: enable it after general proof format has been adapted -//// in abci/examples/kvstore.go -////nolint:unused,deadcode -//func _TestAppProofs(t *testing.T) { -// assert, require := assert.New(t), require.New(t) - -// prt := defaultProofRuntime() -// cl := client.NewLocal(node) -// client.WaitForHeight(cl, 1, nil) - -// // This sets up our trust on the node based on some past point. -// source := certclient.NewProvider(chainID, cl) -// seed, err := source.LatestFullCommit(chainID, 1, 1) -// require.NoError(err, "%#v", err) -// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) - -// // Wait for tx confirmation. -// done := make(chan int64) -// go func() { -// evtTyp := types.EventTx -// _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout) -// require.Nil(err, "%#v", err) -// close(done) -// }() - -// // Submit a transaction. -// k := []byte("my-key") -// v := []byte("my-value") -// tx := kvstoreTx(k, v) -// br, err := cl.BroadcastTxCommit(tx) -// require.NoError(err, "%#v", err) -// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) -// require.EqualValues(0, br.DeliverTx.Code) -// brh := br.Height - -// // Fetch latest after tx commit. -// <-done -// latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) -// require.NoError(err, "%#v", err) -// rootHash := latest.SignedHeader.AppHash -// if rootHash == nil { -// // Fetch one block later, AppHash hasn't been committed yet. -// // TODO find a way to avoid doing this. -// client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil) -// latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1) -// require.NoError(err, "%#v", err) -// rootHash = latest.SignedHeader.AppHash -// } -// require.NotNil(rootHash) - -// // verify a query before the tx block has no data (and valid non-exist proof) -// bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) -// require.NoError(err, "%#v", err) -// require.NotNil(proof) -// require.Equal(height, brh-1) -// // require.NotNil(proof) -// // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, -// // (currently there's a race condition) -// // and ensure that proof proves absence of k. -// require.Nil(bs) - -// // but given that block it is good -// bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert) -// require.NoError(err, "%#v", err) -// require.NotNil(proof) -// require.Equal(height, brh) - -// assert.EqualValues(v, bs) -// err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding -// assert.NoError(err, "%#v", err) - -// // Test non-existing key. -// missing := []byte("my-missing-key") -// bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert) -// require.NoError(err) -// require.Nil(bs) -// require.NotNil(proof) -// err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding -// assert.NoError(err, "%#v", err) -// err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding -// assert.Error(err, "%#v", err) -//} - -//func TestTxProofs(t *testing.T) { -// assert, require := assert.New(t), require.New(t) - -// cl := client.NewLocal(node) -// client.WaitForHeight(cl, 1, nil) - -// tx := kvstoreTx([]byte("key-a"), []byte("value-a")) -// br, err := cl.BroadcastTxCommit(tx) -// require.NoError(err, "%#v", err) -// require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) -// require.EqualValues(0, br.DeliverTx.Code) -// brh := br.Height - -// source := certclient.NewProvider(chainID, cl) -// seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) -// require.NoError(err, "%#v", err) -// cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) - -// // First let's make sure a bogus transaction hash returns a valid non-existence proof. -// key := types.Tx([]byte("bogus")).Hash() -// _, err = cl.Tx(key, true) -// require.NotNil(err) -// require.Contains(err.Error(), "not found") - -// // Now let's check with the real tx root hash. -// key = types.Tx(tx).Hash() -// res, err := cl.Tx(key, true) -// require.NoError(err, "%#v", err) -// require.NotNil(res) -// keyHash := merkle.SimpleHashFromByteSlices([][]byte{key}) -// err = res.Proof.Validate(keyHash) -// assert.NoError(err, "%#v", err) - -// commit, err := GetCertifiedCommit(br.Height, cl, cert) -// require.Nil(err, "%#v", err) -// require.Equal(res.Proof.RootHash, commit.Header.DataHash) -//} diff --git a/lite2/store/db/db.go b/lite2/store/db/db.go deleted file mode 100644 index 8d37ace9f..000000000 --- a/lite2/store/db/db.go +++ /dev/null @@ -1,347 +0,0 @@ -package db - -import ( - "encoding/binary" - "fmt" - "regexp" - "strconv" - "sync" - - "github.com/pkg/errors" - "github.com/tendermint/go-amino" - dbm "github.com/tendermint/tm-db" - - cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" - "github.com/tendermint/tendermint/lite2/store" - "github.com/tendermint/tendermint/types" -) - -var ( - sizeKey = []byte("size") -) - -type dbs struct { - db dbm.DB - prefix string - - mtx sync.RWMutex - size uint16 - - cdc *amino.Codec -} - -// New returns a Store that wraps any DB (with an optional prefix in case you -// want to use one DB with many light clients). -// -// Objects are marshalled using amino (github.com/tendermint/go-amino) -func New(db dbm.DB, prefix string) store.Store { - cdc := amino.NewCodec() - cryptoAmino.RegisterAmino(cdc) - - size := uint16(0) - bz, err := db.Get(sizeKey) - if err == nil && len(bz) > 0 { - size = unmarshalSize(bz) - } - - return &dbs{db: db, prefix: prefix, cdc: cdc, size: size} -} - -// SaveSignedHeaderAndValidatorSet persists SignedHeader and ValidatorSet to -// the db. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error { - if sh.Height <= 0 { - panic("negative or zero height") - } - - shBz, err := s.cdc.MarshalBinaryLengthPrefixed(sh) - if err != nil { - return errors.Wrap(err, "marshalling header") - } - - valSetBz, err := s.cdc.MarshalBinaryLengthPrefixed(valSet) - if err != nil { - return errors.Wrap(err, "marshalling validator set") - } - - s.mtx.Lock() - defer s.mtx.Unlock() - - b := s.db.NewBatch() - b.Set(s.shKey(sh.Height), shBz) - b.Set(s.vsKey(sh.Height), valSetBz) - b.Set(sizeKey, marshalSize(s.size+1)) - - err = b.WriteSync() - b.Close() - - if err == nil { - s.size++ - } - - return err -} - -// DeleteSignedHeaderAndValidatorSet deletes SignedHeader and ValidatorSet from -// the db. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) DeleteSignedHeaderAndValidatorSet(height int64) error { - if height <= 0 { - panic("negative or zero height") - } - - s.mtx.Lock() - defer s.mtx.Unlock() - - b := s.db.NewBatch() - b.Delete(s.shKey(height)) - b.Delete(s.vsKey(height)) - b.Set(sizeKey, marshalSize(s.size-1)) - - err := b.WriteSync() - b.Close() - - if err == nil { - s.size-- - } - - return err -} - -// SignedHeader loads SignedHeader at the given height. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) SignedHeader(height int64) (*types.SignedHeader, error) { - if height <= 0 { - panic("negative or zero height") - } - - bz, err := s.db.Get(s.shKey(height)) - if err != nil { - panic(err) - } - if len(bz) == 0 { - return nil, store.ErrSignedHeaderNotFound - } - - var signedHeader *types.SignedHeader - err = s.cdc.UnmarshalBinaryLengthPrefixed(bz, &signedHeader) - return signedHeader, err -} - -// ValidatorSet loads ValidatorSet at the given height. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) { - if height <= 0 { - panic("negative or zero height") - } - - bz, err := s.db.Get(s.vsKey(height)) - if err != nil { - panic(err) - } - if len(bz) == 0 { - return nil, store.ErrValidatorSetNotFound - } - - var valSet *types.ValidatorSet - err = s.cdc.UnmarshalBinaryLengthPrefixed(bz, &valSet) - return valSet, err -} - -// LastSignedHeaderHeight returns the last SignedHeader height stored. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) LastSignedHeaderHeight() (int64, error) { - itr, err := s.db.ReverseIterator( - s.shKey(1), - append(s.shKey(1<<63-1), byte(0x00)), - ) - if err != nil { - panic(err) - } - defer itr.Close() - - for itr.Valid() { - key := itr.Key() - _, height, ok := parseShKey(key) - if ok { - return height, nil - } - itr.Next() - } - - return -1, nil -} - -// FirstSignedHeaderHeight returns the first SignedHeader height stored. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) FirstSignedHeaderHeight() (int64, error) { - itr, err := s.db.Iterator( - s.shKey(1), - append(s.shKey(1<<63-1), byte(0x00)), - ) - if err != nil { - panic(err) - } - defer itr.Close() - - for itr.Valid() { - key := itr.Key() - _, height, ok := parseShKey(key) - if ok { - return height, nil - } - itr.Next() - } - - return -1, nil -} - -// SignedHeaderBefore iterates over headers until it finds a header before -// the given height. It returns ErrSignedHeaderNotFound if no such header exists. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) SignedHeaderBefore(height int64) (*types.SignedHeader, error) { - if height <= 0 { - panic("negative or zero height") - } - - itr, err := s.db.ReverseIterator( - s.shKey(1), - s.shKey(height), - ) - if err != nil { - panic(err) - } - defer itr.Close() - - for itr.Valid() { - key := itr.Key() - _, existingHeight, ok := parseShKey(key) - if ok { - return s.SignedHeader(existingHeight) - } - itr.Next() - } - - return nil, store.ErrSignedHeaderNotFound -} - -// Prune prunes header & validator set pairs until there are only size pairs -// left. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) Prune(size uint16) error { - // 1) Check how many we need to prune. - s.mtx.RLock() - sSize := s.size - s.mtx.RUnlock() - - if sSize <= size { // nothing to prune - return nil - } - numToPrune := sSize - size - - // 2) Iterate over headers and perform a batch operation. - itr, err := s.db.Iterator( - s.shKey(1), - append(s.shKey(1<<63-1), byte(0x00)), - ) - if err != nil { - panic(err) - } - - b := s.db.NewBatch() - - pruned := 0 - for itr.Valid() && numToPrune > 0 { - key := itr.Key() - _, height, ok := parseShKey(key) - if ok { - b.Delete(s.shKey(height)) - b.Delete(s.vsKey(height)) - } - itr.Next() - numToPrune-- - pruned++ - } - - itr.Close() - - err = b.WriteSync() - b.Close() - if err != nil { - return err - } - - // 3) Update size. - s.mtx.Lock() - defer s.mtx.Unlock() - - s.size -= uint16(pruned) - - if wErr := s.db.SetSync(sizeKey, marshalSize(s.size)); wErr != nil { - return errors.Wrap(wErr, "failed to persist size") - } - - return nil -} - -// Size returns the number of header & validator set pairs. -// -// Safe for concurrent use by multiple goroutines. -func (s *dbs) Size() uint16 { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.size -} - -func (s *dbs) shKey(height int64) []byte { - return []byte(fmt.Sprintf("sh/%s/%020d", s.prefix, height)) -} - -func (s *dbs) vsKey(height int64) []byte { - return []byte(fmt.Sprintf("vs/%s/%020d", s.prefix, height)) -} - -var keyPattern = regexp.MustCompile(`^(sh|vs)/([^/]*)/([0-9]+)$`) - -func parseKey(key []byte) (part string, prefix string, height int64, ok bool) { - submatch := keyPattern.FindSubmatch(key) - if submatch == nil { - return "", "", 0, false - } - part = string(submatch[1]) - prefix = string(submatch[2]) - height, err := strconv.ParseInt(string(submatch[3]), 10, 64) - if err != nil { - return "", "", 0, false - } - ok = true // good! - return -} - -func parseShKey(key []byte) (prefix string, height int64, ok bool) { - var part string - part, prefix, height, ok = parseKey(key) - if part != "sh" { - return "", 0, false - } - return -} - -func marshalSize(size uint16) []byte { - bs := make([]byte, 2) - binary.LittleEndian.PutUint16(bs, size) - return bs -} - -func unmarshalSize(bz []byte) uint16 { - return binary.LittleEndian.Uint16(bz) -} diff --git a/lite2/store/db/db_test.go b/lite2/store/db/db_test.go deleted file mode 100644 index ce45f3bcf..000000000 --- a/lite2/store/db/db_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package db - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/types" -) - -func TestLast_FirstSignedHeaderHeight(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "TestLast_FirstSignedHeaderHeight") - - // Empty store - height, err := dbStore.LastSignedHeaderHeight() - require.NoError(t, err) - assert.EqualValues(t, -1, height) - - height, err = dbStore.FirstSignedHeaderHeight() - require.NoError(t, err) - assert.EqualValues(t, -1, height) - - // 1 key - err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) - require.NoError(t, err) - - height, err = dbStore.LastSignedHeaderHeight() - require.NoError(t, err) - assert.EqualValues(t, 1, height) - - height, err = dbStore.FirstSignedHeaderHeight() - require.NoError(t, err) - assert.EqualValues(t, 1, height) -} - -func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SaveSignedHeaderAndValidatorSet") - - // Empty store - h, err := dbStore.SignedHeader(1) - require.Error(t, err) - assert.Nil(t, h) - - valSet, err := dbStore.ValidatorSet(1) - require.Error(t, err) - assert.Nil(t, valSet) - - // 1 key - err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) - require.NoError(t, err) - - h, err = dbStore.SignedHeader(1) - require.NoError(t, err) - assert.NotNil(t, h) - - valSet, err = dbStore.ValidatorSet(1) - require.NoError(t, err) - assert.NotNil(t, valSet) - - // Empty store - err = dbStore.DeleteSignedHeaderAndValidatorSet(1) - require.NoError(t, err) - - h, err = dbStore.SignedHeader(1) - require.Error(t, err) - assert.Nil(t, h) - - valSet, err = dbStore.ValidatorSet(1) - require.Error(t, err) - assert.Nil(t, valSet) -} - -func Test_SignedHeaderBefore(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SignedHeaderBefore") - - assert.Panics(t, func() { - _, _ = dbStore.SignedHeaderBefore(0) - _, _ = dbStore.SignedHeaderBefore(100) - }) - - err := dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) - require.NoError(t, err) - - h, err := dbStore.SignedHeaderBefore(3) - require.NoError(t, err) - if assert.NotNil(t, h) { - assert.EqualValues(t, 2, h.Height) - } -} - -func Test_Prune(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_Prune") - - // Empty store - assert.EqualValues(t, 0, dbStore.Size()) - err := dbStore.Prune(0) - require.NoError(t, err) - - // One header - err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) - require.NoError(t, err) - - assert.EqualValues(t, 1, dbStore.Size()) - - err = dbStore.Prune(1) - require.NoError(t, err) - assert.EqualValues(t, 1, dbStore.Size()) - - err = dbStore.Prune(0) - require.NoError(t, err) - assert.EqualValues(t, 0, dbStore.Size()) - - // Multiple headers - for i := 1; i <= 10; i++ { - err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: int64(i)}}, &types.ValidatorSet{}) - require.NoError(t, err) - } - - err = dbStore.Prune(11) - require.NoError(t, err) - assert.EqualValues(t, 10, dbStore.Size()) - - err = dbStore.Prune(7) - require.NoError(t, err) - assert.EqualValues(t, 7, dbStore.Size()) -} - -func Test_Concurrency(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_Prune") - - var wg sync.WaitGroup - for i := 1; i <= 100; i++ { - wg.Add(1) - go func(i int64) { - defer wg.Done() - - dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: i}}, &types.ValidatorSet{}) - - dbStore.SignedHeader(i) - dbStore.ValidatorSet(i) - dbStore.LastSignedHeaderHeight() - dbStore.FirstSignedHeaderHeight() - - dbStore.Prune(2) - _ = dbStore.Size() - - dbStore.DeleteSignedHeaderAndValidatorSet(1) - }(int64(i)) - } - - wg.Wait() -} diff --git a/lite2/store/errors.go b/lite2/store/errors.go deleted file mode 100644 index 2f77bc893..000000000 --- a/lite2/store/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package store - -import "errors" - -var ( - // ErrSignedHeaderNotFound is returned when a store does not have the - // requested header. - ErrSignedHeaderNotFound = errors.New("signed header not found") - - // ErrValidatorSetNotFound is returned when a store does not have the - // requested validator set. - ErrValidatorSetNotFound = errors.New("validator set not found") -) diff --git a/lite2/store/store.go b/lite2/store/store.go deleted file mode 100644 index 0d36c48b6..000000000 --- a/lite2/store/store.go +++ /dev/null @@ -1,55 +0,0 @@ -package store - -import "github.com/tendermint/tendermint/types" - -// Store is anything that can persistenly store headers. -type Store interface { - // SaveSignedHeaderAndValidatorSet saves a SignedHeader (h: sh.Height) and a - // ValidatorSet (h: sh.Height). - // - // height must be > 0. - SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error - - // DeleteSignedHeaderAndValidatorSet deletes SignedHeader (h: height) and - // ValidatorSet (h: height). - // - // height must be > 0. - DeleteSignedHeaderAndValidatorSet(height int64) error - - // SignedHeader returns the SignedHeader that corresponds to the given - // height. - // - // height must be > 0. - // - // If SignedHeader is not found, ErrSignedHeaderNotFound is returned. - SignedHeader(height int64) (*types.SignedHeader, error) - - // ValidatorSet returns the ValidatorSet that corresponds to height. - // - // height must be > 0. - // - // If ValidatorSet is not found, ErrValidatorSetNotFound is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) - - // LastSignedHeaderHeight returns the last (newest) SignedHeader height. - // - // If the store is empty, -1 and nil error are returned. - LastSignedHeaderHeight() (int64, error) - - // FirstSignedHeaderHeight returns the first (oldest) SignedHeader height. - // - // If the store is empty, -1 and nil error are returned. - FirstSignedHeaderHeight() (int64, error) - - // SignedHeaderBefore returns the SignedHeader before a certain height. - // - // height must be > 0 && <= LastSignedHeaderHeight. - SignedHeaderBefore(height int64) (*types.SignedHeader, error) - - // Prune removes headers & the associated validator sets when Store reaches a - // defined size (number of header & validator set pairs). - Prune(size uint16) error - - // Size returns a number of currently existing header & validator set pairs. - Size() uint16 -} diff --git a/mempool/bench_test.go b/mempool/bench_test.go index d6f2d9ed2..779110b62 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -18,7 +18,9 @@ func BenchmarkReap(b *testing.B) { for i := 0; i < size; i++ { tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, uint64(i)) - mempool.CheckTx(tx, nil, TxInfo{}) + if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + b.Error(err) + } } b.ResetTimer() for i := 0; i < b.N; i++ { @@ -35,7 +37,9 @@ func BenchmarkCheckTx(b *testing.B) { for i := 0; i < b.N; i++ { tx := make([]byte, 8) binary.BigEndian.PutUint64(tx, uint64(i)) - mempool.CheckTx(tx, nil, TxInfo{}) + if err := mempool.CheckTx(tx, nil, TxInfo{}); err != nil { + b.Error(err) + } } } diff --git a/mempool/cache_test.go b/mempool/cache_test.go index 99bbba406..d9a53f475 100644 --- a/mempool/cache_test.go +++ b/mempool/cache_test.go @@ -20,7 +20,8 @@ func TestCacheRemove(t *testing.T) { for i := 0; i < numTxs; i++ { // probability of collision is 2**-256 txBytes := make([]byte, 32) - rand.Read(txBytes) // nolint: gosec + _, err := rand.Read(txBytes) + require.NoError(t, err) txs[i] = txBytes cache.Push(txBytes) // make sure its added to both the linked list and the map @@ -67,7 +68,8 @@ func TestCacheAfterUpdate(t *testing.T) { tx := types.Tx{byte(v)} updateTxs = append(updateTxs, tx) } - mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) + err := mempool.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) + require.NoError(t, err) for _, v := range tc.reAddIndices { tx := types.Tx{byte(v)} diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 7732032a4..7b0c97522 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -7,9 +7,6 @@ import ( "fmt" "sync" "sync/atomic" - "time" - - "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -18,11 +15,17 @@ import ( "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) +// TxKeySize is the size of the transaction key index +const TxKeySize = sha256.Size + +var newline = []byte("\n") + //-------------------------------------------------------------------------------- // CListMempool is an ordered in-memory pool for transactions before they are @@ -32,9 +35,8 @@ import ( // be efficiently accessed by multiple concurrent readers. type CListMempool struct { // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes - rechecking int32 // for re-checking filtered txs on Update() + height int64 // the last block Update()'d to + txsBytes int64 // total size of mempool, in bytes // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable bool @@ -42,15 +44,19 @@ type CListMempool struct { config *cfg.MempoolConfig - proxyMtx sync.Mutex + // Exclusive mutex for Update method to prevent concurrent execution of + // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. + updateMtx tmsync.RWMutex + preCheck PreCheckFunc + postCheck PostCheckFunc + + wal *auto.AutoFile // a log of mempool txs + txs *clist.CList // concurrent linked-list of good txs proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs - preCheck PreCheckFunc - postCheck PostCheckFunc // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated - // in serial (ie. by abci responses which are called in serial). + // These are not protected by a mutex and are expected to be mutated in + // serial (ie. by abci responses which are called in serial). recheckCursor *clist.CElement // next expected response recheckEnd *clist.CElement // re-checking stops here @@ -62,9 +68,6 @@ type CListMempool struct { // This reduces the pressure on the proxyApp. cache txCache - // A log of mempool txs - wal *auto.AutoFile - logger log.Logger metrics *Metrics @@ -87,7 +90,6 @@ func NewCListMempool( proxyAppConn: proxyAppConn, txs: clist.New(), height: height, - rechecking: 0, recheckCursor: nil, recheckEnd: nil, logger: log.NewNopLogger(), @@ -116,13 +118,15 @@ func (mem *CListMempool) SetLogger(l log.Logger) { } // WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. +// false. This is ran before CheckTx. Only applies to the first created block. +// After that, Update overwrites the existing value. func WithPreCheck(f PreCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.preCheck = f } } // WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. +// false. This is ran after CheckTx. Only applies to the first created block. +// After that, Update overwrites the existing value. func WithPostCheck(f PostCheckFunc) CListMempoolOption { return func(mem *CListMempool) { mem.postCheck = f } } @@ -132,55 +136,64 @@ func WithMetrics(metrics *Metrics) CListMempoolOption { return func(mem *CListMempool) { mem.metrics = metrics } } -// *panics* if can't create directory or open file. -// *not thread safe* -func (mem *CListMempool) InitWAL() { - walDir := mem.config.WalDir() - err := tmos.EnsureDir(walDir, 0700) - if err != nil { - panic(errors.Wrap(err, "Error ensuring WAL dir")) +func (mem *CListMempool) InitWAL() error { + var ( + walDir = mem.config.WalDir() + walFile = walDir + "/wal" + ) + + const perm = 0700 + if err := tmos.EnsureDir(walDir, perm); err != nil { + return err } - af, err := auto.OpenAutoFile(walDir + "/wal") + + af, err := auto.OpenAutoFile(walFile) if err != nil { - panic(errors.Wrap(err, "Error opening WAL file")) + return fmt.Errorf("can't open autofile %s: %w", walFile, err) } + mem.wal = af + return nil } func (mem *CListMempool) CloseWAL() { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - if err := mem.wal.Close(); err != nil { mem.logger.Error("Error closing WAL", "err", err) } mem.wal = nil } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Lock() { - mem.proxyMtx.Lock() + mem.updateMtx.Lock() } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Unlock() { - mem.proxyMtx.Unlock() + mem.updateMtx.Unlock() } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Size() int { return mem.txs.Len() } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsBytes() int64 { return atomic.LoadInt64(&mem.txsBytes) } +// Lock() must be help by the caller during execution. func (mem *CListMempool) FlushAppConn() error { return mem.proxyAppConn.FlushSync() } +// XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. func (mem *CListMempool) Flush() { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() + mem.updateMtx.RLock() + defer mem.updateMtx.RUnlock() + _ = atomic.SwapInt64(&mem.txsBytes, 0) mem.cache.Reset() for e := mem.txs.Front(); e != nil; e = e.Next() { @@ -188,13 +201,17 @@ func (mem *CListMempool) Flush() { e.DetachPrev() } - mem.txsMap = sync.Map{} - _ = atomic.SwapInt64(&mem.txsBytes, 0) + mem.txsMap.Range(func(key, _ interface{}) bool { + mem.txsMap.Delete(key) + return true + }) } // TxsFront returns the first transaction in the ordered list for peer // goroutines to call .NextWait() on. // FIXME: leaking implementation details! +// +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsFront() *clist.CElement { return mem.txs.Front() } @@ -202,6 +219,8 @@ func (mem *CListMempool) TxsFront() *clist.CElement { // TxsWaitChan returns a channel to wait on transactions. It will be closed // once the mempool is not empty (ie. the internal `mem.txs` has at least one // element) +// +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsWaitChan() <-chan struct{} { return mem.txs.WaitChan() } @@ -210,26 +229,19 @@ func (mem *CListMempool) TxsWaitChan() <-chan struct{} { // cb: A callback from the CheckTx command. // It gets called from another goroutine. // CONTRACT: Either cb will get called, or err returned. -func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) (err error) { - mem.proxyMtx.Lock() +// +// Safe for concurrent use by multiple goroutines. +func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo TxInfo) error { + mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic - defer mem.proxyMtx.Unlock() + defer mem.updateMtx.RUnlock() - var ( - memSize = mem.Size() - txsBytes = mem.TxsBytes() - txSize = len(tx) - ) - if memSize >= mem.config.Size || - int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return ErrMempoolIsFull{ - memSize, mem.config.Size, - txsBytes, mem.config.MaxTxsBytes} + txSize := len(tx) + + if err := mem.isFull(txSize); err != nil { + return err } - // The size of the corresponding amino-encoded TxMessage - // can't be larger than the maxMsgSize, otherwise we can't - // relay it to peers. if txSize > mem.config.MaxTxBytes { return ErrTxTooLarge{mem.config.MaxTxBytes, txSize} } @@ -240,43 +252,38 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx } } - // CACHE + // NOTE: writing to the WAL and calling proxy must be done before adding tx + // to the cache. otherwise, if either of them fails, next time CheckTx is + // called with tx, ErrTxInCache will be returned without tx being checked at + // all even once. + if mem.wal != nil { + // TODO: Notify administrators when WAL fails + _, err := mem.wal.Write(append([]byte(tx), newline...)) + if err != nil { + return fmt.Errorf("wal.Write: %w", err) + } + } + + // NOTE: proxyAppConn may error if tx buffer is full + if err := mem.proxyAppConn.Error(); err != nil { + return err + } + if !mem.cache.Push(tx) { // Record a new sender for a tx we've already seen. // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(txKey(tx)); ok { + if e, ok := mem.txsMap.Load(TxKey(tx)); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) memTx.senders.LoadOrStore(txInfo.SenderID, true) // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. - } return ErrTxInCache } - // END CACHE - - // WAL - if mem.wal != nil { - // TODO: Notify administrators when WAL fails - _, err := mem.wal.Write([]byte(tx)) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - _, err = mem.wal.Write([]byte("\n")) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - } - // END WAL - - // NOTE: proxyAppConn may error if tx buffer is full - if err = mem.proxyAppConn.Error(); err != nil { - return err - } reqRes := mem.proxyAppConn.CheckTxAsync(abci.RequestCheckTx{Tx: tx}) reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderP2PID, cb)) @@ -290,7 +297,9 @@ func (mem *CListMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo Tx // and peerID is not included in the ABCI request, so we have to set request-specific callbacks that // include this information. If we're not in the midst of a recheck, this function will just return, // so the request specific callback can do the work. -// When rechecking, we don't need the peerID, so the recheck callback happens here. +// +// When rechecking, we don't need the peerID, so the recheck callback happens +// here. func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { return @@ -340,7 +349,7 @@ func (mem *CListMempool) reqResCb( // - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap.Store(txKey(memTx.tx), e) + mem.txsMap.Store(TxKey(memTx.tx), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } @@ -351,7 +360,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) { func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - mem.txsMap.Delete(txKey(tx)) + mem.txsMap.Delete(TxKey(tx)) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -359,6 +368,32 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC } } +// RemoveTxByKey removes a transaction from the mempool by its TxKey index. +func (mem *CListMempool) RemoveTxByKey(txKey [TxKeySize]byte, removeFromCache bool) { + if e, ok := mem.txsMap.Load(txKey); ok { + memTx := e.(*clist.CElement).Value.(*mempoolTx) + if memTx != nil { + mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) + } + } +} + +func (mem *CListMempool) isFull(txSize int) error { + var ( + memSize = mem.Size() + txsBytes = mem.TxsBytes() + ) + + if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { + return ErrMempoolIsFull{ + memSize, mem.config.Size, + txsBytes, mem.config.MaxTxsBytes, + } + } + + return nil +} + // callback, which is called after the app checked the tx for the first time. // // The case where the app checks the tx for the second and subsequent times is @@ -376,6 +411,15 @@ func (mem *CListMempool) resCbFirstTime( postCheckErr = mem.postCheck(tx, r.CheckTx) } if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { + // Check mempool isn't full again to reduce the chance of exceeding the + // limits. + if err := mem.isFull(len(tx)); err != nil { + // remove from cache (mempool might have a space later) + mem.cache.Remove(tx) + mem.logger.Error(err.Error()) + return + } + memTx := &mempoolTx{ height: mem.height, gasWanted: r.CheckTx.GasWanted, @@ -437,7 +481,6 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { } if mem.recheckCursor == nil { // Done! - atomic.StoreInt32(&mem.rechecking, 0) mem.logger.Info("Done rechecking txs") // incase the recheck removed all txs @@ -450,6 +493,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { } } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) TxsAvailable() <-chan struct{} { return mem.txsAvailable } @@ -468,29 +512,26 @@ func (mem *CListMempool) notifyTxsAvailable() { } } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() + mem.updateMtx.RLock() + defer mem.updateMtx.RUnlock() - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - var totalBytes int64 var totalGas int64 + // TODO: we will get a performance boost if we have a good estimate of avg // size per tx, and set the initial capacity based off of that. // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) txs := make([]types.Tx, 0, mem.txs.Len()) for e := mem.txs.Front(); e != nil; e = e.Next() { memTx := e.Value.(*mempoolTx) + + dataSize := types.ComputeProtoSizeForTxs(append(txs, memTx.tx)) + // Check total size requirement - aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) - if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { + if maxBytes > -1 && dataSize > maxBytes { return txs } - totalBytes += int64(len(memTx.tx)) + aminoOverhead // Check total gas requirement. // If maxGas is negative, skip this check. // Since newTotalGas < masGas, which @@ -505,19 +546,15 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { return txs } +// Safe for concurrent use by multiple goroutines. func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() + mem.updateMtx.RLock() + defer mem.updateMtx.RUnlock() if max < 0 { max = mem.txs.Len() } - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { memTx := e.Value.(*mempoolTx) @@ -526,6 +563,7 @@ func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { return txs } +// Lock() must be help by the caller during execution. func (mem *CListMempool) Update( height int64, txs types.Txs, @@ -563,7 +601,7 @@ func (mem *CListMempool) Update( // Mempool after: // 100 // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(txKey(tx)); ok { + if e, ok := mem.txsMap.Load(TxKey(tx)); ok { mem.removeTx(tx, e.(*clist.CElement), false) } } @@ -593,7 +631,6 @@ func (mem *CListMempool) recheckTxs() { panic("recheckTxs is called, but the mempool is empty") } - atomic.StoreInt32(&mem.rechecking, 1) mem.recheckCursor = mem.txs.Front() mem.recheckEnd = mem.txs.Back() @@ -639,9 +676,9 @@ type txCache interface { // mapTxCache maintains a LRU cache of transactions. This only stores the hash // of the tx, due to memory concerns. type mapTxCache struct { - mtx sync.Mutex + mtx tmsync.Mutex size int - cacheMap map[[sha256.Size]byte]*list.Element + cacheMap map[[TxKeySize]byte]*list.Element list *list.List } @@ -651,7 +688,7 @@ var _ txCache = (*mapTxCache)(nil) func newMapTxCache(cacheSize int) *mapTxCache { return &mapTxCache{ size: cacheSize, - cacheMap: make(map[[sha256.Size]byte]*list.Element, cacheSize), + cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), list: list.New(), } } @@ -659,7 +696,7 @@ func newMapTxCache(cacheSize int) *mapTxCache { // Reset resets the cache to an empty state. func (cache *mapTxCache) Reset() { cache.mtx.Lock() - cache.cacheMap = make(map[[sha256.Size]byte]*list.Element, cache.size) + cache.cacheMap = make(map[[TxKeySize]byte]*list.Element, cache.size) cache.list.Init() cache.mtx.Unlock() } @@ -671,7 +708,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { defer cache.mtx.Unlock() // Use the tx hash in the cache - txHash := txKey(tx) + txHash := TxKey(tx) if moved, exists := cache.cacheMap[txHash]; exists { cache.list.MoveToBack(moved) return false @@ -679,9 +716,9 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { if cache.list.Len() >= cache.size { popped := cache.list.Front() - poppedTxHash := popped.Value.([sha256.Size]byte) - delete(cache.cacheMap, poppedTxHash) if popped != nil { + poppedTxHash := popped.Value.([TxKeySize]byte) + delete(cache.cacheMap, poppedTxHash) cache.list.Remove(popped) } } @@ -693,7 +730,7 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - txHash := txKey(tx) + txHash := TxKey(tx) popped := cache.cacheMap[txHash] delete(cache.cacheMap, txHash) if popped != nil { @@ -713,8 +750,8 @@ func (nopTxCache) Remove(types.Tx) {} //-------------------------------------------------------------------------------- -// txKey is the fixed length array sha256 hash used as the key in maps. -func txKey(tx types.Tx) [sha256.Size]byte { +// TxKey is the fixed length array hash used as the key in maps. +func TxKey(tx types.Tx) [TxKeySize]byte { return sha256.Sum256(tx) } diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 17ab83f33..45f6f5aa8 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -12,11 +12,11 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" @@ -107,7 +107,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { mempool.Flush() // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas + // each tx has 20 bytes tests := []struct { numTxsToCreate int maxBytes int64 @@ -121,11 +121,11 @@ func TestReapMaxBytesMaxGas(t *testing.T) { {20, 0, -1, 0}, {20, 0, 10, 0}, {20, 10, 10, 0}, - {20, 22, 10, 1}, - {20, 220, -1, 10}, - {20, 220, 5, 5}, - {20, 220, 10, 10}, - {20, 220, 15, 10}, + {20, 24, 10, 1}, + {20, 240, 5, 5}, + {20, 240, -1, 10}, + {20, 240, 10, 10}, + {20, 240, 15, 10}, {20, 20000, -1, 20}, {20, 20000, 5, 5}, {20, 20000, 30, 20}, @@ -150,7 +150,7 @@ func TestMempoolFilters(t *testing.T) { nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas + // each tx has 20 bytes tests := []struct { numTxsToCreate int preFilter PreCheckFunc @@ -158,20 +158,20 @@ func TestMempoolFilters(t *testing.T) { expectedNumTxs int }{ {10, nopPreFilter, nopPostFilter, 10}, - {10, PreCheckAminoMaxBytes(10), nopPostFilter, 0}, - {10, PreCheckAminoMaxBytes(20), nopPostFilter, 0}, - {10, PreCheckAminoMaxBytes(22), nopPostFilter, 10}, + {10, PreCheckMaxBytes(10), nopPostFilter, 0}, + {10, PreCheckMaxBytes(22), nopPostFilter, 10}, {10, nopPreFilter, PostCheckMaxGas(-1), 10}, {10, nopPreFilter, PostCheckMaxGas(0), 0}, {10, nopPreFilter, PostCheckMaxGas(1), 10}, {10, nopPreFilter, PostCheckMaxGas(3000), 10}, - {10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0}, - {10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10}, - {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10}, - {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0}, + {10, PreCheckMaxBytes(10), PostCheckMaxGas(20), 0}, + {10, PreCheckMaxBytes(30), PostCheckMaxGas(20), 10}, + {10, PreCheckMaxBytes(22), PostCheckMaxGas(1), 10}, + {10, PreCheckMaxBytes(22), PostCheckMaxGas(0), 0}, } for tcIndex, tt := range tests { - mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) + err := mempool.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) + require.NoError(t, err) checkTxs(t, mempool, tt.numTxsToCreate, UnknownPeerID) require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) mempool.Flush() @@ -186,8 +186,9 @@ func TestMempoolUpdate(t *testing.T) { // 1. Adds valid txs to the cache { - mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - err := mempool.CheckTx([]byte{0x01}, nil, TxInfo{}) + err := mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + err = mempool.CheckTx([]byte{0x01}, nil, TxInfo{}) if assert.Error(t, err) { assert.Equal(t, ErrTxInCache, err) } @@ -197,7 +198,8 @@ func TestMempoolUpdate(t *testing.T) { { err := mempool.CheckTx([]byte{0x02}, nil, TxInfo{}) require.NoError(t, err) - mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err = mempool.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) assert.Zero(t, mempool.Size()) } @@ -205,11 +207,12 @@ func TestMempoolUpdate(t *testing.T) { { err := mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) require.NoError(t, err) - mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) + err = mempool.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) + require.NoError(t, err) assert.Zero(t, mempool.Size()) err = mempool.CheckTx([]byte{0x03}, nil, TxInfo{}) - assert.NoError(t, err) + require.NoError(t, err) } } @@ -259,7 +262,6 @@ func TestTxsAvailable(t *testing.T) { func TestSerialReap(t *testing.T) { app := counter.NewApplication(true) - app.SetOption(abci.RequestSetOption{Key: "serial", Value: "on"}) cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) @@ -386,7 +388,8 @@ func TestMempoolCloseWAL(t *testing.T) { mempool, cleanup := newMempoolWithAppAndConfig(cc, wcfg) defer cleanup() mempool.height = 10 - mempool.InitWAL() + err = mempool.InitWAL() + require.NoError(t, err) // 4. Ensure that the directory contains the WAL file m2, err := filepath.Glob(filepath.Join(rootDir, "*")) @@ -394,7 +397,8 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m2), "expecting the wal match in") // 5. Write some contents to the WAL - mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{}) + err = mempool.CheckTx(types.Tx([]byte("foo")), nil, TxInfo{}) + require.NoError(t, err) walFilepath := mempool.wal.Path sum1 := checksumFile(walFilepath, t) @@ -404,7 +408,8 @@ func TestMempoolCloseWAL(t *testing.T) { // 7. Invoke CloseWAL() and ensure it discards the // WAL thus any other write won't go through. mempool.CloseWAL() - mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{}) + err = mempool.CheckTx(types.Tx([]byte("bar")), nil, TxInfo{}) + require.NoError(t, err) sum2 := checksumFile(walFilepath, t) require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") @@ -414,62 +419,46 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m3), "expecting the wal match in") } -// Size of the amino encoded TxMessage is the length of the -// encoded byte array, plus 1 for the struct field, plus 4 -// for the amino prefix. -func txMessageSize(tx types.Tx) int { - return amino.ByteSliceSize(tx) + 1 + 4 -} - -func TestMempoolMaxMsgSize(t *testing.T) { +func TestMempool_CheckTxChecksTxSize(t *testing.T) { app := kvstore.NewApplication() cc := proxy.NewLocalClientCreator(app) mempl, cleanup := newMempoolWithApp(cc) defer cleanup() maxTxSize := mempl.config.MaxTxBytes - maxMsgSize := calcMaxMsgSize(maxTxSize) testCases := []struct { len int err bool }{ // check small txs. no error - {10, false}, - {1000, false}, - {1000000, false}, + 0: {10, false}, + 1: {1000, false}, + 2: {1000000, false}, // check around maxTxSize - // changes from no error to error - {maxTxSize - 2, false}, - {maxTxSize - 1, false}, - {maxTxSize, false}, - {maxTxSize + 1, true}, - {maxTxSize + 2, true}, - - // check around maxMsgSize. all error - {maxMsgSize - 1, true}, - {maxMsgSize, true}, - {maxMsgSize + 1, true}, + 3: {maxTxSize - 1, false}, + 4: {maxTxSize, false}, + 5: {maxTxSize + 1, true}, } for i, testCase := range testCases { caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) tx := tmrand.Bytes(testCase.len) + err := mempl.CheckTx(tx, nil, TxInfo{}) - msg := &TxMessage{tx} - encoded := cdc.MustMarshalBinaryBare(msg) - require.Equal(t, len(encoded), txMessageSize(tx), caseString) + bv := gogotypes.BytesValue{Value: tx} + bz, err2 := bv.Marshal() + require.NoError(t, err2) + require.Equal(t, len(bz), proto.Size(&bv), caseString) + if !testCase.err { - require.True(t, len(encoded) <= maxMsgSize, caseString) require.NoError(t, err, caseString) } else { - require.True(t, len(encoded) > maxMsgSize, caseString) require.Equal(t, err, ErrTxTooLarge{maxTxSize, testCase.len}, caseString) } } - } func TestMempoolTxsBytes(t *testing.T) { @@ -489,7 +478,8 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 1, mempool.TxsBytes()) // 3. zero again after tx is removed by Update - mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + err = mempool.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) assert.EqualValues(t, 0, mempool.TxsBytes()) // 4. zero after Flush @@ -525,7 +515,11 @@ func TestMempoolTxsBytes(t *testing.T) { appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) err = appConnCon.Start() require.Nil(t, err) - defer appConnCon.Stop() + t.Cleanup(func() { + if err := appConnCon.Stop(); err != nil { + t.Error(err) + } + }) res, err := appConnCon.DeliverTxSync(abci.RequestDeliverTx{Tx: txBytes}) require.NoError(t, err) require.EqualValues(t, 0, res.Code) @@ -534,8 +528,19 @@ func TestMempoolTxsBytes(t *testing.T) { require.NotEmpty(t, res2.Data) // Pretend like we committed nothing so txBytes gets rechecked and removed. - mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) + err = mempool.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) + require.NoError(t, err) assert.EqualValues(t, 0, mempool.TxsBytes()) + + // 7. Test RemoveTxByKey function + err = mempool.CheckTx([]byte{0x06}, nil, TxInfo{}) + require.NoError(t, err) + assert.EqualValues(t, 1, mempool.TxsBytes()) + mempool.RemoveTxByKey(TxKey([]byte{0x07}), true) + assert.EqualValues(t, 1, mempool.TxsBytes()) + mempool.RemoveTxByKey(TxKey([]byte{0x06}), true) + assert.EqualValues(t, 0, mempool.TxsBytes()) + } // This will non-deterministically catch some concurrency failures like @@ -546,7 +551,11 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) app := kvstore.NewApplication() cc, server := newRemoteApp(t, sockPath, app) - defer server.Stop() + t.Cleanup(func() { + if err := server.Stop(); err != nil { + t.Error(err) + } + }) config := cfg.ResetTestRoot("mempool_test") mempool, cleanup := newMempoolWithAppAndConfig(cc, config) defer cleanup() @@ -568,7 +577,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { tx := txs[txNum] // this will err with ErrTxInCache many times ... - mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) + mempool.CheckTx(tx, nil, TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error } err := mempool.FlushAppConn() require.NoError(t, err) @@ -595,7 +604,7 @@ func newRemoteApp( } func checksumIt(data []byte) string { h := sha256.New() - h.Write(data) + h.Write(data) //nolint: errcheck // ignore errcheck return fmt.Sprintf("%x", h.Sum(nil)) } diff --git a/mempool/codec.go b/mempool/codec.go deleted file mode 100644 index 9647e8c2c..000000000 --- a/mempool/codec.go +++ /dev/null @@ -1,11 +0,0 @@ -package mempool - -import ( - amino "github.com/tendermint/go-amino" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterMessages(cdc) -} diff --git a/mempool/doc.go b/mempool/doc.go index ddd47aa2d..7e6363e12 100644 --- a/mempool/doc.go +++ b/mempool/doc.go @@ -6,19 +6,18 @@ // safely by calling .NextWait() on each element. // So we have several go-routines: -// 1. Consensus calling Update() and Reap() synchronously +// 1. Consensus calling Update() and ReapMaxBytesMaxGas() synchronously // 2. Many mempool reactor's peer routines calling CheckTx() // 3. Many mempool reactor's peer routines traversing the txs linked list -// 4. Another goroutine calling GarbageCollectTxs() periodically // To manage these goroutines, there are three methods of locking. // 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) // 2. Mutations to the linked-list elements are atomic -// 3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx +// 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx -// Garbage collection of old elements from mempool.txs is handlde via -// the DetachPrev() call, which makes old elements not reachable by -// peer broadcastTxRoutine() automatically garbage collected. +// Garbage collection of old elements from mempool.txs is handlde via the +// DetachPrev() call, which makes old elements not reachable by peer +// broadcastTxRoutine(). // TODO: Better handle abci client errors. (make it automatically handle connection errors) package mempool diff --git a/mempool/errors.go b/mempool/errors.go index 8e4dcf7cd..e33e14ca3 100644 --- a/mempool/errors.go +++ b/mempool/errors.go @@ -1,9 +1,8 @@ package mempool import ( + "errors" "fmt" - - "github.com/pkg/errors" ) var ( diff --git a/mempool/mempool.go b/mempool/mempool.go index 97919ab99..d01958b53 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -37,7 +37,7 @@ type Mempool interface { // Update informs the mempool that the given txs were committed and can be discarded. // NOTE: this should be called *after* block is committed by consensus. - // NOTE: unsafe; Lock/Unlock must be managed by caller + // NOTE: Lock/Unlock must be managed by caller Update( blockHeight int64, blockTxs types.Txs, @@ -48,6 +48,7 @@ type Mempool interface { // FlushAppConn flushes the mempool connection to ensure async reqResCb calls are // done. E.g. from CheckTx. + // NOTE: Lock/Unlock must be managed by caller FlushAppConn() error // Flush removes all transactions from the mempool and cache @@ -68,8 +69,9 @@ type Mempool interface { // TxsBytes returns the total size of all txs in the mempool. TxsBytes() int64 - // InitWAL creates a directory for the WAL file and opens a file itself. - InitWAL() + // InitWAL creates a directory for the WAL file and opens a file itself. If + // there is an error, it will be of type *PathError. + InitWAL() error // CloseWAL closes and discards the underlying WAL file. // Any further writes will not be relayed to disk. @@ -100,19 +102,13 @@ type TxInfo struct { //-------------------------------------------------------------------------------- -// PreCheckAminoMaxBytes checks that the size of the transaction plus the amino -// overhead is smaller or equal to the expected maxBytes. -func PreCheckAminoMaxBytes(maxBytes int64) PreCheckFunc { +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal to the expected maxBytes. +func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { - // We have to account for the amino overhead in the tx size as well - // NOTE: fieldNum = 1 as types.Block.Data contains Txs []Tx as first field. - // If this field order ever changes this needs to updated here accordingly. - // NOTE: if some []Tx are encoded without a parenting struct, the - // fieldNum is also equal to 1. - aminoOverhead := types.ComputeAminoOverhead(tx, 1) - txSize := int64(len(tx)) + aminoOverhead + txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + if txSize > maxBytes { - return fmt.Errorf("tx size (including amino overhead) is too big: %d, max: %d", + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) } return nil diff --git a/mock/mempool.go b/mempool/mock/mempool.go similarity index 94% rename from mock/mempool.go rename to mempool/mock/mempool.go index 8c5b6e38f..be690efaa 100644 --- a/mock/mempool.go +++ b/mempool/mock/mempool.go @@ -38,5 +38,5 @@ func (Mempool) TxsBytes() int64 { return 0 } func (Mempool) TxsFront() *clist.CElement { return nil } func (Mempool) TxsWaitChan() <-chan struct{} { return nil } -func (Mempool) InitWAL() {} -func (Mempool) CloseWAL() {} +func (Mempool) InitWAL() error { return nil } +func (Mempool) CloseWAL() {} diff --git a/mempool/reactor.go b/mempool/reactor.go index fda12c021..6bf0ce7d7 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -1,26 +1,23 @@ package mempool import ( + "errors" "fmt" "math" - "reflect" - "sync" "time" - amino "github.com/tendermint/go-amino" - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/clist" "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/types" ) const ( MempoolChannel = byte(0x30) - aminoOverheadForTxMessage = 8 - peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount // UnknownPeerID is the peer ID to use when running CheckTx when there is @@ -41,7 +38,7 @@ type Reactor struct { } type mempoolIDs struct { - mtx sync.RWMutex + mtx tmsync.RWMutex peerMap map[p2p.ID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter @@ -134,13 +131,15 @@ func (memR *Reactor) OnStart() error { return nil } -// GetChannels implements Reactor. -// It returns the list of channels for this reactor. +// GetChannels implements Reactor by returning the list of channels for this +// reactor. func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { + maxMsgSize := memR.config.MaxBatchBytes return []*p2p.ChannelDescriptor{ { - ID: MempoolChannel, - Priority: 5, + ID: MempoolChannel, + Priority: 5, + RecvMessageCapacity: maxMsgSize, }, } } @@ -148,7 +147,9 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor. // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *Reactor) AddPeer(peer p2p.Peer) { - go memR.broadcastTxRoutine(peer) + if memR.config.Broadcast { + go memR.broadcastTxRoutine(peer) + } } // RemovePeer implements Reactor. @@ -159,6 +160,8 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { // Receive implements Reactor. // It adds any received transactions to the mempool. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { msg, err := memR.decodeMsg(msgBytes) if err != nil { @@ -168,20 +171,17 @@ func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - switch msg := msg.(type) { - case *TxMessage: - txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)} - if src != nil { - txInfo.SenderP2PID = src.ID() - } - err := memR.mempool.CheckTx(msg.Tx, nil, txInfo) + txInfo := TxInfo{SenderID: memR.ids.GetForPeer(src)} + if src != nil { + txInfo.SenderP2PID = src.ID() + } + for _, tx := range msg.Txs { + err = memR.mempool.CheckTx(tx, nil, txInfo) if err != nil { - memR.Logger.Info("Could not check tx", "tx", txID(msg.Tx), "err", err) + memR.Logger.Info("Could not check tx", "tx", txID(tx), "err", err) } - // broadcasting happens from go routines per peer - default: - memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } + // broadcasting happens from go routines per peer } // PeerState describes the state of a peer. @@ -191,12 +191,9 @@ type PeerState interface { // Send new mempool txs to peer. func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { - if !memR.config.Broadcast { - return - } - peerID := memR.ids.GetForPeer(peer) var next *clist.CElement + for { // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time if !memR.IsRunning() || !peer.IsRunning() { @@ -218,9 +215,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } - memTx := next.Value.(*mempoolTx) - - // make sure the peer is up to date + // Make sure the peer is up to date. peerState, ok := peer.Get(types.PeerStateKey).(PeerState) if !ok { // Peer does not have a state yet. We set it in the consensus reactor, but @@ -231,16 +226,29 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue } - if peerState.GetHeight() < memTx.Height()-1 { // Allow for a lag of 1 block + + // Allow for a lag of 1 block. + memTx := next.Value.(*mempoolTx) + if peerState.GetHeight() < memTx.Height()-1 { time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue } - // ensure peer hasn't already sent us this tx - if _, ok := memTx.senders.Load(peerID); !ok { - // send memTx - msg := &TxMessage{Tx: memTx.tx} - success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) + txs := memR.txs(next, peerID, peerState.GetHeight()) // WARNING: mutates next! + + // send txs + if len(txs) > 0 { + msg := protomem.Message{ + Sum: &protomem.Message_Txs{ + Txs: &protomem.Txs{Txs: txs}, + }, + } + bz, err := msg.Marshal() + if err != nil { + panic(err) + } + memR.Logger.Debug("Sending N txs to peer", "N", len(txs), "peer", peer) + success := peer.Send(MempoolChannel, bz) if !success { time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) continue @@ -259,40 +267,77 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } +// txs iterates over the transaction list and builds a batch of txs. next is +// included. +// WARNING: mutates next! +func (memR *Reactor) txs(next *clist.CElement, peerID uint16, peerHeight int64) [][]byte { + batch := make([][]byte, 0) + + for { + memTx := next.Value.(*mempoolTx) + + if _, ok := memTx.senders.Load(peerID); !ok { + // If current batch + this tx size is greater than max => return. + batchMsg := protomem.Message{ + Sum: &protomem.Message_Txs{ + Txs: &protomem.Txs{Txs: append(batch, memTx.tx)}, + }, + } + if batchMsg.Size() > memR.config.MaxBatchBytes { + return batch + } + + batch = append(batch, memTx.tx) + } + + n := next.Next() + if n == nil { + return batch + } + next = n + } +} + //----------------------------------------------------------------------------- // Messages -// Message is a message sent or received by the Reactor. -type Message interface{} +func (memR *Reactor) decodeMsg(bz []byte) (TxsMessage, error) { + msg := protomem.Message{} + err := msg.Unmarshal(bz) + if err != nil { + return TxsMessage{}, err + } -func RegisterMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*Message)(nil), nil) - cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil) -} + var message TxsMessage + + if i, ok := msg.Sum.(*protomem.Message_Txs); ok { + txs := i.Txs.GetTxs() + + if len(txs) == 0 { + return message, errors.New("empty TxsMessage") + } -func (memR *Reactor) decodeMsg(bz []byte) (msg Message, err error) { - maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes) - if l := len(bz); l > maxMsgSize { - return msg, ErrTxTooLarge{maxMsgSize, l} + decoded := make([]types.Tx, len(txs)) + for j, tx := range txs { + decoded[j] = types.Tx(tx) + } + + message = TxsMessage{ + Txs: decoded, + } + return message, nil } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return + return message, fmt.Errorf("msg type: %T is not supported", msg) } //------------------------------------- -// TxMessage is a Message containing a transaction. -type TxMessage struct { - Tx types.Tx -} - -// String returns a string representation of the TxMessage. -func (m *TxMessage) String() string { - return fmt.Sprintf("[TxMessage %v]", m.Tx) +// TxsMessage is a Message containing transactions. +type TxsMessage struct { + Txs []types.Tx } -// calcMaxMsgSize returns the max size of TxMessage -// account for amino overhead of TxMessage -func calcMaxMsgSize(maxTxSize int) int { - return maxTxSize + aminoOverheadForTxMessage +// String returns a string representation of the TxsMessage. +func (m *TxsMessage) String() string { + return fmt.Sprintf("[TxsMessage %v]", m.Txs) } diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 87da0557d..d9e67d166 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -1,6 +1,8 @@ package mempool import ( + "encoding/hex" + "errors" "net" "sync" "testing" @@ -8,18 +10,26 @@ import ( "github.com/fortytw2/leaktest" "github.com/go-kit/kit/log/term" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/mock" + memproto "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) +const ( + numTxs = 1000 + timeout = 120 * time.Second // ridiculously high because CircleCI is slow +) + type peerState struct { height int64 } @@ -28,97 +38,104 @@ func (ps peerState) GetHeight() int64 { return ps.height } -// mempoolLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). -func mempoolLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} +// Send a bunch of txs to the first reactor's mempool and wait for them all to +// be received in the others. +func TestReactorBroadcastTxsMessage(t *testing.T) { + config := cfg.TestConfig() + // if there were more than two reactors, the order of transactions could not be + // asserted in waitForTxsOnReactors (due to transactions gossiping). If we + // replace Connect2Switches (full mesh) with a func, which connects first + // reactor to others and nothing else, this test should also pass with >2 reactors. + const N = 2 + reactors := makeAndConnectReactors(config, N) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + assert.NoError(t, err) } } - return term.FgBgColor{} - }) -} + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + peer.Set(types.PeerStateKey, peerState{1}) + } + } -// connect N mempool reactors through N switches -func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { - reactors := make([]*Reactor, n) - logger := mempoolLogger() - for i := 0; i < n; i++ { - app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - mempool, cleanup := newMempoolWithApp(cc) - defer cleanup() + txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) + waitForTxsOnReactors(t, txs, reactors) +} - reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states - reactors[i].SetLogger(logger.With("validator", i)) +// regression test for https://github.com/tendermint/tendermint/issues/5408 +func TestReactorConcurrency(t *testing.T) { + config := cfg.TestConfig() + const N = 2 + reactors := makeAndConnectReactors(config, N) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } + } + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + peer.Set(types.PeerStateKey, peerState{1}) + } } + var wg sync.WaitGroup - p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("MEMPOOL", reactors[i]) - return s + const numTxs = 5 - }, p2p.Connect2Switches) - return reactors -} + for i := 0; i < 1000; i++ { + wg.Add(2) -func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { - // wait for the txs in all mempools - wg := new(sync.WaitGroup) - for i, reactor := range reactors { - wg.Add(1) - go func(r *Reactor, reactorIndex int) { + // 1. submit a bunch of txs + // 2. update the whole mempool + txs := checkTxs(t, reactors[0].mempool, numTxs, UnknownPeerID) + go func() { defer wg.Done() - waitForTxsOnReactor(t, txs, r, reactorIndex) - }(reactor, i) - } - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() + reactors[0].mempool.Lock() + defer reactors[0].mempool.Unlock() - timer := time.After(Timeout) - select { - case <-timer: - t.Fatal("Timed out waiting for txs") - case <-done: - } -} + deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) + for i := range txs { + deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} + } + err := reactors[0].mempool.Update(1, txs, deliverTxResponses, nil, nil) + assert.NoError(t, err) + }() + + // 1. submit a bunch of txs + // 2. update none + _ = checkTxs(t, reactors[1].mempool, numTxs, UnknownPeerID) + go func() { + defer wg.Done() -func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { - mempool := reactor.mempool - for mempool.Size() < len(txs) { - time.Sleep(time.Millisecond * 100) - } + reactors[1].mempool.Lock() + defer reactors[1].mempool.Unlock() + err := reactors[1].mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) + assert.NoError(t, err) + }() - reapedTxs := mempool.ReapMaxTxs(len(txs)) - for i, tx := range txs { - assert.Equalf(t, tx, reapedTxs[i], - "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) + // 1. flush the mempool + reactors[1].mempool.Flush() } -} -// ensure no txs on reactor after some timeout -func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) { - time.Sleep(timeout) // wait for the txs in all mempools - assert.Zero(t, reactor.mempool.Size()) + wg.Wait() } -const ( - NumTxs = 1000 - Timeout = 120 * time.Second // ridiculously high because CircleCI is slow -) - -func TestReactorBroadcastTxMessage(t *testing.T) { +// Send a bunch of txs to the first reactor's mempool, claiming it came from peer +// ensure peer gets no txs. +func TestReactorNoBroadcastToSender(t *testing.T) { config := cfg.TestConfig() - const N = 4 + const N = 2 reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { - r.Stop() + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } } }() for _, r := range reactors { @@ -127,26 +144,49 @@ func TestReactorBroadcastTxMessage(t *testing.T) { } } - // send a bunch of txs to the first reactor's mempool - // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].mempool, NumTxs, UnknownPeerID) - waitForTxsOnReactors(t, txs, reactors) + const peerID = 1 + checkTxs(t, reactors[0].mempool, numTxs, peerID) + ensureNoTxs(t, reactors[peerID], 100*time.Millisecond) } -func TestReactorNoBroadcastToSender(t *testing.T) { +func TestReactor_MaxBatchBytes(t *testing.T) { config := cfg.TestConfig() + config.Mempool.MaxBatchBytes = 1024 + const N = 2 reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { - r.Stop() + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } } }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + peer.Set(types.PeerStateKey, peerState{1}) + } + } - // send a bunch of txs to the first reactor's mempool, claiming it came from peer - // ensure peer gets no txs - checkTxs(t, reactors[0].mempool, NumTxs, 1) + // Broadcast a tx, which has the max size (minus proto overhead) + // => ensure it's received by the second reactor. + tx1 := tmrand.Bytes(1018) + err := reactors[0].mempool.CheckTx(tx1, nil, TxInfo{SenderID: UnknownPeerID}) + require.NoError(t, err) + waitForTxsOnReactors(t, []types.Tx{tx1}, reactors) + + reactors[0].mempool.Flush() + reactors[1].mempool.Flush() + + // Broadcast a tx, which is beyond the max size + // => ensure it's not sent + tx2 := tmrand.Bytes(1020) + err = reactors[0].mempool.CheckTx(tx2, nil, TxInfo{SenderID: UnknownPeerID}) + require.NoError(t, err) ensureNoTxs(t, reactors[1], 100*time.Millisecond) + // => ensure the second reactor did not disconnect from us + out, in, _ := reactors[1].Switch.NumPeers() + assert.Equal(t, 1, out+in) } func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { @@ -159,7 +199,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { - r.Stop() + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } } }() @@ -183,7 +225,9 @@ func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { // stop reactors for _, r := range reactors { - r.Stop() + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } } // check that we are not leaking any go-routines @@ -230,7 +274,9 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { reactors := makeAndConnectReactors(config, N) defer func() { for _, r := range reactors { - r.Stop() + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } } }() reactor := reactors[0] @@ -241,3 +287,107 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { reactor.AddPeer(peer) } } + +// mempoolLogger is a TestingLogger which uses a different +// color for each validator ("validator" key must exist). +func mempoolLogger() log.Logger { + return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { + for i := 0; i < len(keyvals)-1; i += 2 { + if keyvals[i] == "validator" { + return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} + } + } + return term.FgBgColor{} + }) +} + +// connect N mempool reactors through N switches +func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { + reactors := make([]*Reactor, n) + logger := mempoolLogger() + for i := 0; i < n; i++ { + app := kvstore.NewApplication() + cc := proxy.NewLocalClientCreator(app) + mempool, cleanup := newMempoolWithApp(cc) + defer cleanup() + + reactors[i] = NewReactor(config.Mempool, mempool) // so we dont start the consensus states + reactors[i].SetLogger(logger.With("validator", i)) + } + + p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch { + s.AddReactor("MEMPOOL", reactors[i]) + return s + + }, p2p.Connect2Switches) + return reactors +} + +func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { + // wait for the txs in all mempools + wg := new(sync.WaitGroup) + for i, reactor := range reactors { + wg.Add(1) + go func(r *Reactor, reactorIndex int) { + defer wg.Done() + waitForTxsOnReactor(t, txs, r, reactorIndex) + }(reactor, i) + } + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + timer := time.After(timeout) + select { + case <-timer: + t.Fatal("Timed out waiting for txs") + case <-done: + } +} + +func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + mempool := reactor.mempool + for mempool.Size() < len(txs) { + time.Sleep(time.Millisecond * 100) + } + + reapedTxs := mempool.ReapMaxTxs(len(txs)) + for i, tx := range txs { + assert.Equalf(t, tx, reapedTxs[i], + "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) + } +} + +// ensure no txs on reactor after some timeout +func ensureNoTxs(t *testing.T, reactor *Reactor, timeout time.Duration) { + time.Sleep(timeout) // wait for the txs in all mempools + assert.Zero(t, reactor.mempool.Size()) +} + +func TestMempoolVectors(t *testing.T) { + testCases := []struct { + testName string + tx []byte + expBytes string + }{ + {"tx 1", []byte{123}, "0a030a017b"}, + {"tx 2", []byte("proto encoding in mempool"), "0a1b0a1970726f746f20656e636f64696e6720696e206d656d706f6f6c"}, + } + + for _, tc := range testCases { + tc := tc + + msg := memproto.Message{ + Sum: &memproto.Message_Txs{ + Txs: &memproto.Txs{Txs: [][]byte{tc.tx}}, + }, + } + bz, err := msg.Marshal() + require.NoError(t, err, tc.testName) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } +} diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile index 03af5aa3c..ac4bc6071 100644 --- a/networks/local/localnode/Dockerfile +++ b/networks/local/localnode/Dockerfile @@ -1,5 +1,4 @@ FROM alpine:3.7 -MAINTAINER Greg Szabo RUN apk update && \ apk upgrade && \ diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh index 2a88035af..07382ba71 100644 --- a/networks/remote/integration.sh +++ b/networks/remote/integration.sh @@ -10,8 +10,8 @@ sudo apt-get upgrade -y sudo apt-get install -y jq unzip python-pip software-properties-common make # get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.13.linux-amd64.tar.gz -tar -xvf go1.13.linux-amd64.tar.gz +curl -O https://dl.google.com/go/go1.15.4.linux-amd64.tar.gz +tar -xvf go1.15.4.linux-amd64.tar.gz ## move binary and add to path mv go /usr/local diff --git a/node/codec.go b/node/codec.go deleted file mode 100644 index e172b9696..000000000 --- a/node/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package node - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) -} diff --git a/node/id.go b/node/id.go index 18e6aeb5d..ffa162f81 100644 --- a/node/id.go +++ b/node/id.go @@ -30,6 +30,6 @@ type SignedNodeGreeting struct { } func (pnid *PrivNodeID) SignGreeting() *SignedNodeGreeting { - //greeting := NodeGreeting{} + // greeting := NodeGreeting{} return nil } diff --git a/node/node.go b/node/node.go index 5fb0664ea..7f365ed4c 100644 --- a/node/node.go +++ b/node/node.go @@ -3,20 +3,18 @@ package node import ( "bytes" "context" + "errors" "fmt" "net" "net/http" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port - "os" "strings" "time" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" - amino "github.com/tendermint/go-amino" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" @@ -24,26 +22,27 @@ import ( bcv1 "github.com/tendermint/tendermint/blockchain/v1" bcv2 "github.com/tendermint/tendermint/blockchain/v2" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/evidence" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/light" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" rpccore "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" grpccore "github.com/tendermint/tendermint/rpc/grpc" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/statesync" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -65,7 +64,7 @@ type DBProvider func(*DBContext) (dbm.DB, error) // specified in the ctx.Config. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { dbType := dbm.BackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } // GenesisDocProvider returns a GenesisDoc. @@ -88,31 +87,18 @@ type Provider func(*cfg.Config, log.Logger) (*Node, error) // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { - // Generate node PrivKey nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) } - // Convert old PrivValidator if it exists. - oldPrivVal := config.OldPrivValidatorFile() - newPrivValKey := config.PrivValidatorKeyFile() - newPrivValState := config.PrivValidatorStateFile() - if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) { - oldPV, err := privval.LoadOldFilePV(oldPrivVal) - if err != nil { - return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v", oldPrivVal, err) - } - logger.Info("Upgrading PrivValidator file", - "old", oldPrivVal, - "newKey", newPrivValKey, - "newState", newPrivValState, - ) - oldPV.Upgrade(newPrivValKey, newPrivValState) + pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + if err != nil { + return nil, err } return NewNode(config, - privval.LoadOrGenFilePV(newPrivValKey, newPrivValState), + pval, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -142,6 +128,12 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { // Option sets a parameter for the node. type Option func(*Node) +// Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors. +// See: https://github.com/tendermint/tendermint/issues/4595 +type fastSyncReactor interface { + SwitchToFastSync(sm.State) error +} + // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to // the node's Switch. // @@ -153,6 +145,7 @@ type Option func(*Node) // - CONSENSUS // - EVIDENCE // - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -166,6 +159,15 @@ func CustomReactors(reactors map[string]p2p.Reactor) Option { } } +// StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and +// build a State object for bootstrapping the node. +// WARNING: this interface is considered unstable and subject to change. +func StateProvider(stateProvider statesync.StateProvider) Option { + return func(n *Node) { + n.stateSyncProvider = stateProvider + } +} + //------------------------------------------------------------------------------ // Node is the highest level interface to a full Tendermint node. @@ -187,21 +189,25 @@ type Node struct { isListening bool // services - eventBus *types.EventBus // pub/sub for services - stateDB dbm.DB - blockStore *store.BlockStore // store the blockchain to disk - bcReactor p2p.Reactor // for fast-syncing - mempoolReactor *mempl.Reactor // for gossipping transactions - mempool mempl.Mempool - consensusState *cs.State // latest consensus state - consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application - rpcListeners []net.Listener // rpc servers - txIndexer txindex.TxIndexer - indexerService *txindex.IndexerService - prometheusSrv *http.Server + eventBus *types.EventBus // pub/sub for services + stateStore sm.Store + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool + stateSync bool // whether the node should state sync on startup + stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots + stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node + stateSyncGenesis sm.State // provides the genesis state for state sync + consensusState *cs.State // latest consensus state + consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses + evidencePool *evidence.Pool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server } func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { @@ -248,14 +254,7 @@ func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, if err != nil { return nil, nil, err } - switch { - case config.TxIndex.IndexKeys != "": - txIndexer = kv.NewTxIndex(store, kv.IndexEvents(splitAndTrimEmpty(config.TxIndex.IndexKeys, ",", " "))) - case config.TxIndex.IndexAllKeys: - txIndexer = kv.NewTxIndex(store, kv.IndexAllEvents()) - default: - txIndexer = kv.NewTxIndex(store) - } + txIndexer = kv.NewTxIndex(store) default: txIndexer = &null.TxIndex{} } @@ -269,7 +268,7 @@ func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, } func doHandshake( - stateDB dbm.DB, + stateStore sm.Store, state sm.State, blockStore sm.BlockStore, genDoc *types.GenesisDoc, @@ -277,7 +276,7 @@ func doHandshake( proxyApp proxy.AppConns, consensusLogger log.Logger) error { - handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) if err := handshaker.Handshake(proxyApp); err != nil { @@ -341,15 +340,17 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, } func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { + stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { return nil, nil, err } evidenceLogger := logger.With("module", "evidence") - evidencePool := evidence.NewPool(stateDB, evidenceDB) - evidencePool.SetLogger(evidenceLogger) + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + if err != nil { + return nil, nil, err + } evidenceReactor := evidence.NewReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) return evidenceReactor, evidencePool, nil @@ -385,9 +386,9 @@ func createConsensusReactor(config *cfg.Config, evidencePool *evidence.Pool, privValidator types.PrivValidator, csMetrics *cs.Metrics, - fastSync bool, + waitSync bool, eventBus *types.EventBus, - consensusLogger log.Logger) (*consensus.Reactor, *consensus.State) { + consensusLogger log.Logger) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, @@ -402,7 +403,7 @@ func createConsensusReactor(config *cfg.Config, if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor @@ -485,7 +486,8 @@ func createSwitch(config *cfg.Config, peerFilters []p2p.PeerFilterFunc, mempoolReactor *mempl.Reactor, bcReactor p2p.Reactor, - consensusReactor *consensus.Reactor, + stateSyncReactor *statesync.Reactor, + consensusReactor *cs.Reactor, evidenceReactor *evidence.Reactor, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, @@ -502,6 +504,7 @@ func createSwitch(config *cfg.Config, sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) @@ -520,14 +523,14 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, if config.P2P.ExternalAddress != "" { addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) if err != nil { - return nil, errors.Wrap(err, "p2p.external_address is incorrect") + return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) } addrBook.AddOurAddress(addr) } if config.P2P.ListenAddress != "" { addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) if err != nil { - return nil, errors.Wrap(err, "p2p.laddr is incorrect") + return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) } addrBook.AddOurAddress(addr) } @@ -558,6 +561,62 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, return pexReactor } +// startStateSync starts an asynchronous state sync process, then switches to fast sync mode. +func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, + stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, + stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { + ssR.Logger.Info("Starting state sync") + + if stateProvider == nil { + var err error + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stateProvider, err = statesync.NewLightClientStateProvider( + ctx, + state.ChainID, state.Version, state.InitialHeight, + config.RPCServers, light.TrustOptions{ + Period: config.TrustPeriod, + Height: config.TrustHeight, + Hash: config.TrustHashBytes(), + }, ssR.Logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + } + + go func() { + state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + if err != nil { + ssR.Logger.Error("State sync failed", "err", err) + return + } + err = stateStore.Bootstrap(state) + if err != nil { + ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) + return + } + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + ssR.Logger.Error("Failed to store last seen commit", "err", err) + return + } + + if fastSync { + // FIXME Very ugly to have these metrics bleed through here. + conR.Metrics.StateSyncing.Set(0) + conR.Metrics.FastSyncing.Set(1) + err = bcR.SwitchToFastSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to fast sync", "err", err) + return + } + } else { + conR.SwitchToConsensus(state, true) + } + }() + return nil +} + // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, @@ -574,6 +633,8 @@ func NewNode(config *cfg.Config, return nil, err } + stateStore := sm.NewStore(stateDB) + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { return nil, err @@ -600,53 +661,65 @@ func NewNode(config *cfg.Config, return nil, err } - // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, - // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") - if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { - return nil, err - } - - // Reload the state. It will have the Version.Consensus.App set by the - // Handshake, and may have other modifications as well (ie. depending on - // what happened during block replay). - state = sm.LoadState(stateDB) - // If an address is provided, listen on the socket for a connection from an // external signing process. if config.PrivValidatorListenAddr != "" { // FIXME: we should start services inside OnStart - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, logger) + privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger) if err != nil { - return nil, errors.Wrap(err, "error with private validator socket client") + return nil, fmt.Errorf("error with private validator socket client: %w", err) } } pubKey, err := privValidator.GetPubKey() if err != nil { - return nil, errors.Wrap(err, "can't get pubkey") + return nil, fmt.Errorf("can't get pubkey: %w", err) } - logNodeStartupInfo(state, pubKey, logger, consensusLogger) + // Determine whether we should attempt state sync. + stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + if stateSync && state.LastBlockHeight > 0 { + logger.Info("Found local state with non-zero height, skipping state sync") + stateSync = false + } - // Decide whether to fast-sync or not - // We don't fast-sync when the only validator is us. + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if !stateSync { + if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state, err = stateStore.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } + } + + // Determine whether we should do fast sync. This must happen after the handshake, since the + // app may modify the validator set, specifying ourself as the only validator. fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + logNodeStartupInfo(state, pubKey, logger, consensusLogger) + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) // Make MempoolReactor mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) // Make Evidence Reactor - evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, logger) + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) if err != nil { return nil, err } // make block executor for consensus and blockchain reactors to execute blocks blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, logger.With("module", "state"), proxyApp.Consensus(), mempool, @@ -654,18 +727,32 @@ func NewNode(config *cfg.Config, sm.BlockExecutorWithMetrics(smMetrics), ) - // Make BlockchainReactor - bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync, logger) + // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger) if err != nil { - return nil, errors.Wrap(err, "could not create blockchain reactor") + return nil, fmt.Errorf("could not create blockchain reactor: %w", err) } - // Make ConsensusReactor + // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. + // FIXME We need to update metrics here, since other reactors don't have access to them. + if stateSync { + csMetrics.StateSyncing.Set(1) + } else if fastSync { + csMetrics.FastSyncing.Set(1) + } consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, - privValidator, csMetrics, fastSync, eventBus, consensusLogger, + privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, ) + // Set up state sync reactor, and schedule a sync if requested. + // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // we should clean this whole thing up. See: + // https://github.com/tendermint/tendermint/issues/4644 + stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(), + config.StateSync.TempDir) + stateSyncReactor.SetLogger(logger.With("module", "statesync")) + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) if err != nil { return nil, err @@ -678,22 +765,22 @@ func NewNode(config *cfg.Config, p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, - consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, ) err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) if err != nil { - return nil, errors.Wrap(err, "could not add peers from persistent_peers field") + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) } err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) if err != nil { - return nil, errors.Wrap(err, "could not add peer ids from unconditional_peer_ids field") + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) } addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) if err != nil { - return nil, errors.Wrap(err, "could not create addrbook") + return nil, fmt.Errorf("could not create addrbook: %w", err) } // Optionally, start the pex reactor @@ -713,9 +800,10 @@ func NewNode(config *cfg.Config, pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } - if config.ProfListenAddress != "" { + if config.RPC.PprofListenAddress != "" { go func() { - logger.Error("Profile server", "err", http.ListenAndServe(config.ProfListenAddress, nil)) + logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) }() } @@ -730,13 +818,16 @@ func NewNode(config *cfg.Config, nodeInfo: nodeInfo, nodeKey: nodeKey, - stateDB: stateDB, + stateStore: stateStore, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, mempool: mempool, consensusState: consensusState, consensusReactor: consensusReactor, + stateSyncReactor: stateSyncReactor, + stateSync: stateSync, + stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state pexReactor: pexReactor, evidencePool: evidencePool, proxyApp: proxyApp, @@ -792,7 +883,10 @@ func (n *Node) OnStart() error { n.isListening = true if n.config.Mempool.WalEnabled() { - n.mempool.InitWAL() // no need to have the mempool wal during tests + err = n.mempool.InitWAL() + if err != nil { + return fmt.Errorf("init mempool WAL: %w", err) + } } // Start the switch (the P2P server). @@ -804,7 +898,20 @@ func (n *Node) OnStart() error { // Always connect to persistent peers err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) if err != nil { - return errors.Wrap(err, "could not dial peers from persistent_peers field") + return fmt.Errorf("could not dial peers from persistent_peers field: %w", err) + } + + // Run state sync + if n.stateSync { + bcR, ok := n.bcReactor.(fastSyncReactor) + if !ok { + return fmt.Errorf("this blockchain reactor does not support switching from state sync") + } + err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) + if err != nil { + return fmt.Errorf("failed to start state sync: %w", err) + } } return nil @@ -817,11 +924,17 @@ func (n *Node) OnStop() { n.Logger.Info("Stopping Node") // first stop the non-reactor services - n.eventBus.Stop() - n.indexerService.Stop() + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } // now stop the reactors - n.sw.Stop() + if err := n.sw.Stop(); err != nil { + n.Logger.Error("Error closing switch", "err", err) + } // stop mempool WAL if n.config.Mempool.WalEnabled() { @@ -843,7 +956,9 @@ func (n *Node) OnStop() { } if pvsc, ok := n.privValidator.(service.Service); ok { - pvsc.Stop() + if err := pvsc.Stop(); err != nil { + n.Logger.Error("Error closing private validator", "err", err) + } } if n.prometheusSrv != nil { @@ -854,35 +969,44 @@ func (n *Node) OnStop() { } } -// ConfigureRPC sets all variables in rpccore so they will serve -// rpc calls from this node -func (n *Node) ConfigureRPC() { - rpccore.SetStateDB(n.stateDB) - rpccore.SetBlockStore(n.blockStore) - rpccore.SetConsensusState(n.consensusState) - rpccore.SetMempool(n.mempool) - rpccore.SetEvidencePool(n.evidencePool) - rpccore.SetP2PPeers(n.sw) - rpccore.SetP2PTransport(n) +// ConfigureRPC makes sure RPC has all the objects it needs to operate. +func (n *Node) ConfigureRPC() error { pubKey, err := n.privValidator.GetPubKey() if err != nil { - panic(err) - } - rpccore.SetPubKey(pubKey) - rpccore.SetGenesisDoc(n.genesisDoc) - rpccore.SetProxyAppQuery(n.proxyApp.Query()) - rpccore.SetTxIndexer(n.txIndexer) - rpccore.SetConsensusReactor(n.consensusReactor) - rpccore.SetEventBus(n.eventBus) - rpccore.SetLogger(n.Logger.With("module", "rpc")) - rpccore.SetConfig(*n.config.RPC) + return fmt.Errorf("can't get pubkey: %w", err) + } + rpccore.SetEnvironment(&rpccore.Environment{ + ProxyAppQuery: n.proxyApp.Query(), + ProxyAppMempool: n.proxyApp.Mempool(), + + StateStore: n.stateStore, + BlockStore: n.blockStore, + EvidencePool: n.evidencePool, + ConsensusState: n.consensusState, + P2PPeers: n.sw, + P2PTransport: n, + + PubKey: pubKey, + GenDoc: n.genesisDoc, + TxIndexer: n.txIndexer, + ConsensusReactor: n.consensusReactor, + EventBus: n.eventBus, + Mempool: n.mempool, + + Logger: n.Logger.With("module", "rpc"), + + Config: *n.config.RPC, + }) + return nil } func (n *Node) startRPC() ([]net.Listener, error) { - n.ConfigureRPC() + err := n.ConfigureRPC() + if err != nil { + return nil, err + } + listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - coreCodec := amino.NewCodec() - ctypes.RegisterAmino(coreCodec) if n.config.RPC.Unsafe { rpccore.AddUnsafeRoutes() @@ -905,7 +1029,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { mux := http.NewServeMux() rpcLogger := n.Logger.With("module", "rpc-server") wmLogger := rpcLogger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, + wm := rpcserver.NewWebsocketManager(rpccore.Routes, rpcserver.OnDisconnect(func(remoteAddr string) { err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) if err != nil && err != tmpubsub.ErrSubscriptionNotFound { @@ -916,7 +1040,7 @@ func (n *Node) startRPC() ([]net.Listener, error) { ) wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, config, @@ -935,21 +1059,29 @@ func (n *Node) startRPC() ([]net.Listener, error) { rootHandler = corsMiddleware.Handler(mux) } if n.config.RPC.IsTLSEnabled() { - go rpcserver.StartHTTPAndTLSServer( - listener, - rootHandler, - n.config.RPC.CertFile(), - n.config.RPC.KeyFile(), - rpcLogger, - config, - ) + go func() { + if err := rpcserver.ServeTLS( + listener, + rootHandler, + n.config.RPC.CertFile(), + n.config.RPC.KeyFile(), + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server with TLS", "err", err) + } + }() } else { - go rpcserver.StartHTTPServer( - listener, - rootHandler, - rpcLogger, - config, - ) + go func() { + if err := rpcserver.Serve( + listener, + rootHandler, + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server", "err", err) + } + }() } listeners[i] = listener @@ -973,11 +1105,17 @@ func (n *Node) startRPC() ([]net.Listener, error) { if err != nil { return nil, err } - go grpccore.StartGRPCServer(listener) + go func() { + if err := grpccore.StartGRPCServer(listener); err != nil { + n.Logger.Error("Error starting gRPC server", "err", err) + } + }() listeners = append(listeners, listener) + } return listeners, nil + } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics @@ -1122,6 +1260,7 @@ func makeNodeInfo( cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, mempl.MempoolChannel, evidence.EvidenceChannel, + statesync.SnapshotChannel, statesync.ChunkChannel, }, Moniker: config.Moniker, Other: p2p.DefaultNodeInfoOther{ @@ -1153,9 +1292,8 @@ var ( ) // LoadStateFromDBOrGenesisDocProvider attempts to load the state from the -// database, or creates one using the given genesisDocProvider and persists the -// result to the database. On success this also returns the genesis doc loaded -// through the given provider. +// database, or creates one using the given genesisDocProvider. On success this also +// returns the genesis doc loaded through the given provider. func LoadStateFromDBOrGenesisDocProvider( stateDB dbm.DB, genesisDocProvider GenesisDocProvider, @@ -1169,9 +1307,12 @@ func LoadStateFromDBOrGenesisDocProvider( } // save genesis doc to prevent a certain class of user errors (e.g. when it // was changed, accidentally or not). Also good for audit trail. - saveGenesisDoc(stateDB, genDoc) + if err := saveGenesisDoc(stateDB, genDoc); err != nil { + return sm.State{}, nil, err + } } - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err } @@ -1188,7 +1329,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { return nil, errors.New("genesis doc not found") } var genDoc *types.GenesisDoc - err = cdc.UnmarshalJSON(b, &genDoc) + err = tmjson.Unmarshal(b, &genDoc) if err != nil { panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) } @@ -1196,29 +1337,46 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { } // panics if failed to marshal the given genesis document -func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { - b, err := cdc.MarshalJSON(genDoc) +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { + b, err := tmjson.Marshal(genDoc) if err != nil { - panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) } - db.SetSync(genesisDocKey, b) + if err := db.SetSync(genesisDocKey, b); err != nil { + return err + } + + return nil } func createAndStartPrivValidatorSocketClient( - listenAddr string, + listenAddr, + chainID string, logger log.Logger, ) (types.PrivValidator, error) { pve, err := privval.NewSignerListener(listenAddr, logger) if err != nil { - return nil, errors.Wrap(err, "failed to start private validator") + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + pvsc, err := privval.NewSignerClient(pve, chainID) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) } - pvsc, err := privval.NewSignerClient(pve) + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey() if err != nil { - return nil, errors.Wrap(err, "failed to start private validator") + return nil, fmt.Errorf("can't get pubkey: %w", err) } - return pvsc, nil + const ( + retries = 50 // 50 * 100ms = 5s total + timeout = 100 * time.Millisecond + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil } // splitAndTrimEmpty slices s into all subslices separated by sep and returns a diff --git a/node/node_test.go b/node/node_test.go index a9a43a362..375ec3c86 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -26,9 +26,9 @@ import ( "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - "github.com/tendermint/tendermint/version" ) func TestNodeStartStop(t *testing.T) { @@ -56,7 +56,8 @@ func TestNodeStartStop(t *testing.T) { // stop the node go func() { - n.Stop() + err = n.Stop() + require.NoError(t, err) }() select { @@ -104,7 +105,7 @@ func TestNodeDelayedStart(t *testing.T) { err = n.Start() require.NoError(t, err) - defer n.Stop() + defer n.Stop() //nolint:errcheck // ignore for tests startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) @@ -119,10 +120,11 @@ func TestNodeSetAppVersion(t *testing.T) { require.NoError(t, err) // default config uses the kvstore app - var appVersion version.Protocol = kvstore.ProtocolVersion + var appVersion uint64 = kvstore.ProtocolVersion // check version is set in state - state := sm.LoadState(n.stateDB) + state, err := n.stateStore.Load() + require.NoError(t, err) assert.Equal(t, state.Version.Consensus.App, appVersion) // check version is set in node info @@ -155,11 +157,11 @@ func TestNodeSetPrivValTCP(t *testing.T) { panic(err) } }() - defer signerServer.Stop() + defer signerServer.Stop() //nolint:errcheck // ignore for tests n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) + assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } // address without a protocol must result in error @@ -199,11 +201,11 @@ func TestNodeSetPrivValIPC(t *testing.T) { err := pvsc.Start() require.NoError(t, err) }() - defer pvsc.Stop() + defer pvsc.Stop() //nolint:errcheck // ignore for tests n, err := DefaultNewNode(config, log.TestingLogger()) require.NoError(t, err) - assert.IsType(t, &privval.SignerClient{}, n.PrivValidator()) + assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } // testFreeAddr claims a free port so we don't block on listener being ready. @@ -224,18 +226,22 @@ func TestCreateProposalBlock(t *testing.T) { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() var height int64 = 1 - state, stateDB := state(1, height) + state, stateDB, privVals := state(1, height) + stateStore := sm.NewStore(stateDB) maxBytes := 16384 + var partSize uint32 = 256 + maxEvidenceBytes := int64(maxBytes / 2) state.ConsensusParams.Block.MaxBytes = int64(maxBytes) + state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool - memplMetrics := mempl.PrometheusMetrics("node_test") + memplMetrics := mempl.PrometheusMetrics("node_test_1") mempool := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), @@ -247,33 +253,38 @@ func TestCreateProposalBlock(t *testing.T) { mempool.SetLogger(logger) // Make EvidencePool - types.RegisterMockEvidencesGlobal() // XXX! - evidence.RegisterMockEvidences() evidenceDB := dbm.NewMemDB() - evidencePool := evidence.NewPool(stateDB, evidenceDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) + require.NoError(t, err) evidencePool.SetLogger(logger) // fill the evidence pool with more evidence // than can fit in a block - minEvSize := 12 - numEv := (maxBytes / types.MaxEvidenceBytesDenominator) / minEvSize - for i := 0; i < numEv; i++ { - ev := types.NewMockRandomEvidence(1, time.Now(), proposerAddr, tmrand.Bytes(minEvSize)) - err := evidencePool.AddEvidence(ev) - assert.NoError(t, err) + var currentBytes int64 = 0 + for currentBytes <= maxEvidenceBytes { + ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") + currentBytes += int64(len(ev.Bytes())) + err := evidencePool.AddEvidenceFromConsensus(ev) + require.NoError(t, err) } + evList, size := evidencePool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) + require.Less(t, size, state.ConsensusParams.Evidence.MaxBytes+1) + evData := &types.EvidenceData{Evidence: evList} + require.EqualValues(t, size, evData.ByteSize()) + // fill the mempool with more txs // than can fit in a block - txLength := 1000 - for i := 0; i < maxBytes/txLength; i++ { + txLength := 100 + for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) assert.NoError(t, err) } blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, logger, proxyApp.Consensus(), mempool, @@ -287,10 +298,83 @@ func TestCreateProposalBlock(t *testing.T) { proposerAddr, ) + // check that the part set does not exceed the maximum block size + partSet := block.MakePartSet(partSize) + assert.Less(t, partSet.ByteSize(), int64(maxBytes)) + + partSetFromHeader := types.NewPartSetFromHeader(partSet.Header()) + for partSetFromHeader.Count() < partSetFromHeader.Total() { + added, err := partSetFromHeader.AddPart(partSet.GetPart(int(partSetFromHeader.Count()))) + require.NoError(t, err) + require.True(t, added) + } + assert.EqualValues(t, partSetFromHeader.ByteSize(), partSet.ByteSize()) + err = blockExec.ValidateBlock(state, block) assert.NoError(t, err) } +func TestMaxProposalBlockSize(t *testing.T) { + config := cfg.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(config.RootDir) + cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + logger := log.TestingLogger() + + var height int64 = 1 + state, stateDB, _ := state(1, height) + stateStore := sm.NewStore(stateDB) + var maxBytes int64 = 16384 + var partSize uint32 = 256 + state.ConsensusParams.Block.MaxBytes = maxBytes + proposerAddr, _ := state.Validators.GetByIndex(0) + + // Make Mempool + memplMetrics := mempl.PrometheusMetrics("node_test_2") + mempool := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + ) + mempool.SetLogger(logger) + + // fill the mempool with one txs just below the maximum size + txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) + tx := tmrand.Bytes(txLength - 4) // to account for the varint + err = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + assert.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp.Consensus(), + mempool, + sm.EmptyEvidencePool{}, + ) + + commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + block, _ := blockExec.CreateProposalBlock( + height, + state, commit, + proposerAddr, + ) + + pb, err := block.ToProto() + require.NoError(t, err) + assert.Less(t, int64(pb.Size()), maxBytes) + + // check that the part set does not exceed the maximum block size + partSet := block.MakePartSet(partSize) + assert.EqualValues(t, partSet.ByteSize(), int64(pb.Size())) +} + func TestNodeNewNodeCustomReactors(t *testing.T) { config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") defer os.RemoveAll(config.RootDir) @@ -300,9 +384,11 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) require.NoError(t, err) + pval, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + require.NoError(t, err) n, err := NewNode(config, - privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + pval, nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), @@ -315,7 +401,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { err = n.Start() require.NoError(t, err) - defer n.Stop() + defer n.Stop() //nolint:errcheck // ignore for tests assert.True(t, cr.IsRunning()) assert.Equal(t, cr, n.Switch().Reactor("FOO")) @@ -324,14 +410,15 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { assert.Equal(t, customBlockchainReactor, n.Switch().Reactor("BLOCKCHAIN")) } -func state(nVals int, height int64) (sm.State, dbm.DB) { +func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { - secret := []byte(fmt.Sprintf("test%d", i)) - pk := ed25519.GenPrivKeyFromSecret(secret) + privVal := types.NewMockPV() + privVals[i] = privVal vals[i] = types.GenesisValidator{ - Address: pk.PubKey().Address(), - PubKey: pk.PubKey(), + Address: privVal.PrivKey.PubKey().Address(), + PubKey: privVal.PrivKey.PubKey(), Power: 1000, Name: fmt.Sprintf("test%d", i), } @@ -344,12 +431,17 @@ func state(nVals int, height int64) (sm.State, dbm.DB) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() - sm.SaveState(stateDB, s) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(s); err != nil { + panic(err) + } for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - sm.SaveState(stateDB, s) + if err := stateStore.Save(s); err != nil { + panic(err) + } } - return s, stateDB + return s, stateDB, privVals } diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index 86b0d980a..59faf4c3f 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -44,6 +44,9 @@ type Reactor interface { // copying. // // CONTRACT: msgBytes are not nil. + // + // XXX: do not call any methods that can block or incur heavy processing. + // https://github.com/tendermint/tendermint/issues/2888 Receive(chID byte, peer Peer, msgBytes []byte) } diff --git a/p2p/codec.go b/p2p/codec.go deleted file mode 100644 index 463276318..000000000 --- a/p2p/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package p2p - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) -} diff --git a/p2p/conn/codec.go b/p2p/conn/codec.go deleted file mode 100644 index 0625c7a38..000000000 --- a/p2p/conn/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package conn - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc *amino.Codec = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) - RegisterPacket(cdc) -} diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 0436e115c..b5290116d 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -2,26 +2,26 @@ package conn import ( "bufio" - "runtime/debug" - + "errors" "fmt" "io" "math" "net" "reflect" - "sync" + "runtime/debug" "sync/atomic" "time" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" + "github.com/gogo/protobuf/proto" flow "github.com/tendermint/tendermint/libs/flowrate" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/libs/timer" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) const ( @@ -67,7 +67,7 @@ There are two methods for sending messages: `Send(chID, msgBytes)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`, or until the -request times out. The message `msg` is serialized using Go-Amino. +request times out. The message `msg` is serialized using Protobuf. `TrySend(chID, msgBytes)` is a nonblocking call that returns false if the channel's queue is full. @@ -101,7 +101,7 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx sync.Mutex + stopMtx tmsync.Mutex flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled. pingTimer *time.Ticker // send pings periodically @@ -291,7 +291,7 @@ func (c *MConnection) FlushStop() { // Now we can close the connection } - c.conn.Close() // nolint: errcheck + c.conn.Close() // We can't close pong safely here because // recvRoutine may write to it after we've stopped. @@ -307,7 +307,7 @@ func (c *MConnection) OnStop() { return } - c.conn.Close() // nolint: errcheck + c.conn.Close() // We can't close pong safely here because // recvRoutine may write to it after we've stopped. @@ -323,7 +323,7 @@ func (c *MConnection) flush() { c.Logger.Debug("Flush", "conn", c) err := c.bufConnWriter.Flush() if err != nil { - c.Logger.Error("MConnection flush failed", "err", err) + c.Logger.Debug("MConnection flush failed", "err", err) } } @@ -331,12 +331,14 @@ func (c *MConnection) flush() { func (c *MConnection) _recover() { if r := recover(); r != nil { c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) - c.stopForError(errors.Errorf("recovered from panic: %v", r)) + c.stopForError(fmt.Errorf("recovered from panic: %v", r)) } } func (c *MConnection) stopForError(r interface{}) { - c.Stop() + if err := c.Stop(); err != nil { + c.Logger.Error("Error stopping connection", "err", err) + } if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { if c.onError != nil { c.onError(r) @@ -419,9 +421,11 @@ func (c *MConnection) CanSend(chID byte) bool { func (c *MConnection) sendRoutine() { defer c._recover() + protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter) + FOR_LOOP: for { - var _n int64 + var _n int var err error SELECTION: select { @@ -435,11 +439,12 @@ FOR_LOOP: } case <-c.pingTimer.C: c.Logger.Debug("Send Ping") - _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) + _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) if err != nil { + c.Logger.Error("Failed to send PacketPing", "err", err) break SELECTION } - c.sendMonitor.Update(int(_n)) + c.sendMonitor.Update(_n) c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { select { @@ -457,11 +462,12 @@ FOR_LOOP: } case <-c.pong: c.Logger.Debug("Send Pong") - _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{}) + _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) if err != nil { + c.Logger.Error("Failed to send PacketPong", "err", err) break SELECTION } - c.sendMonitor.Update(int(_n)) + c.sendMonitor.Update(_n) c.flush() case <-c.quitSendRoutine: break FOR_LOOP @@ -541,7 +547,7 @@ func (c *MConnection) sendPacketMsg() bool { c.stopForError(err) return true } - c.sendMonitor.Update(int(_n)) + c.sendMonitor.Update(_n) c.flushTimer.Set() return false } @@ -553,6 +559,8 @@ func (c *MConnection) sendPacketMsg() bool { func (c *MConnection) recvRoutine() { defer c._recover() + protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize) + FOR_LOOP: for { // Block until .recvMonitor says we can read. @@ -573,12 +581,9 @@ FOR_LOOP: */ // Read packet type - var packet Packet - var _n int64 - var err error - _n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) - c.recvMonitor.Update(int(_n)) + var packet tmp2p.Packet + err := protoReader.ReadMsg(&packet) if err != nil { // stopServices was invoked and we are shutting down // receiving is excpected to fail since we will close the connection @@ -592,7 +597,7 @@ FOR_LOOP: if err == io.EOF { c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) } else { - c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) } c.stopForError(err) } @@ -600,8 +605,8 @@ FOR_LOOP: } // Read more depending on packet type. - switch pkt := packet.(type) { - case PacketPing: + switch pkt := packet.Sum.(type) { + case *tmp2p.Packet_PacketPing: // TODO: prevent abuse, as they cause flush()'s. // https://github.com/tendermint/tendermint/issues/1190 c.Logger.Debug("Receive Ping") @@ -610,34 +615,34 @@ FOR_LOOP: default: // never block } - case PacketPong: + case *tmp2p.Packet_PacketPong: c.Logger.Debug("Receive Pong") select { case c.pongTimeoutCh <- false: default: // never block } - case PacketMsg: - channel, ok := c.channelsIdx[pkt.ChannelID] + case *tmp2p.Packet_PacketMsg: + channel, ok := c.channelsIdx[byte(pkt.PacketMsg.ChannelID)] if !ok || channel == nil { - err := fmt.Errorf("unknown channel %X", pkt.ChannelID) - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) + c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) c.stopForError(err) break FOR_LOOP } - msgBytes, err := channel.recvPacketMsg(pkt) + msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg) if err != nil { if c.IsRunning() { - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) c.stopForError(err) } break FOR_LOOP } if msgBytes != nil { - c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) + c.Logger.Debug("Received bytes", "chID", pkt.PacketMsg.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine - c.onReceive(pkt.ChannelID, msgBytes) + c.onReceive(byte(pkt.PacketMsg.ChannelID), msgBytes) } default: err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) @@ -662,14 +667,17 @@ func (c *MConnection) stopPongTimer() { } } -// maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead -// of amino encoding. +// maxPacketMsgSize returns a maximum size of PacketMsg func (c *MConnection) maxPacketMsgSize() int { - return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{ + bz, err := proto.Marshal(mustWrapPacket(&tmp2p.PacketMsg{ ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), - })) + 10 // leave room for changes in amino + EOF: true, + Data: make([]byte, c.config.MaxPacketMsgPayloadSize), + })) + if err != nil { + panic(err) + } + return len(bz) } type ConnectionStatus struct { @@ -815,17 +823,16 @@ func (ch *Channel) isSendPending() bool { // Creates a new PacketMsg to send. // Not goroutine-safe -func (ch *Channel) nextPacketMsg() PacketMsg { - packet := PacketMsg{} - packet.ChannelID = ch.desc.ID +func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { + packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize - packet.Bytes = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] + packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { - packet.EOF = byte(0x01) + packet.EOF = true ch.sending = nil atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize } else { - packet.EOF = byte(0x00) + packet.EOF = false ch.sending = ch.sending[tmmath.MinInt(maxSize, len(ch.sending)):] } return packet @@ -833,24 +840,24 @@ func (ch *Channel) nextPacketMsg() PacketMsg { // Writes next PacketMsg to w and updates c.recentlySent. // Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { - var packet = ch.nextPacketMsg() - n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet) - atomic.AddInt64(&ch.recentlySent, n) +func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { + packet := ch.nextPacketMsg() + n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) + atomic.AddInt64(&ch.recentlySent, int64(n)) return } // Handles incoming PacketMsgs. It returns a message bytes if message is // complete. NOTE message bytes may change on next call to recvPacketMsg. // Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { +func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) - var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes) + var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) } - ch.recving = append(ch.recving, packet.Bytes...) - if packet.EOF == byte(0x01) { + ch.recving = append(ch.recving, packet.Data...) + if packet.EOF { msgBytes := ch.recving // clear the slice without re-allocating. @@ -874,33 +881,34 @@ func (ch *Channel) updateStats() { //---------------------------------------- // Packet -type Packet interface { - AssertIsPacket() -} - -func RegisterPacket(cdc *amino.Codec) { - cdc.RegisterInterface((*Packet)(nil), nil) - cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil) - cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil) - cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil) -} - -func (PacketPing) AssertIsPacket() {} -func (PacketPong) AssertIsPacket() {} -func (PacketMsg) AssertIsPacket() {} - -type PacketPing struct { -} - -type PacketPong struct { -} - -type PacketMsg struct { - ChannelID byte - EOF byte // 1 means message ends here. - Bytes []byte -} +// mustWrapPacket takes a packet kind (oneof) and wraps it in a tmp2p.Packet message. +func mustWrapPacket(pb proto.Message) *tmp2p.Packet { + var msg tmp2p.Packet + + switch pb := pb.(type) { + case *tmp2p.Packet: // already a packet + msg = *pb + case *tmp2p.PacketPing: + msg = tmp2p.Packet{ + Sum: &tmp2p.Packet_PacketPing{ + PacketPing: pb, + }, + } + case *tmp2p.PacketPong: + msg = tmp2p.Packet{ + Sum: &tmp2p.Packet_PacketPong{ + PacketPong: pb, + }, + } + case *tmp2p.PacketMsg: + msg = tmp2p.Packet{ + Sum: &tmp2p.Packet_PacketMsg{ + PacketMsg: pb, + }, + } + default: + panic(fmt.Errorf("unknown packet type %T", pb)) + } -func (mp PacketMsg) String() string { - return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) + return &msg } diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 29d29fc6e..a189e8b89 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -1,18 +1,20 @@ package conn import ( - "bytes" + "encoding/hex" "net" "testing" "time" "github.com/fortytw2/leaktest" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/protoio" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" + "github.com/tendermint/tendermint/proto/tendermint/types" ) const maxPingPongPacketSize = 1024 // bytes @@ -43,23 +45,23 @@ func createMConnectionWithCallbacks( func TestMConnectionSendFlushStop(t *testing.T) { server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck + defer server.Close() + defer client.Close() clientConn := createTestMConnection(client) err := clientConn.Start() require.Nil(t, err) - defer clientConn.Stop() + defer clientConn.Stop() // nolint:errcheck // ignore for tests msg := []byte("abc") assert.True(t, clientConn.Send(0x01, msg)) - aminoMsgLength := 14 + msgLength := 14 // start the reader in a new routine, so we can flush errCh := make(chan error) go func() { - msgB := make([]byte, aminoMsgLength) + msgB := make([]byte, msgLength) _, err := server.Read(msgB) if err != nil { t.Error(err) @@ -81,13 +83,13 @@ func TestMConnectionSendFlushStop(t *testing.T) { func TestMConnectionSend(t *testing.T) { server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck + defer server.Close() + defer client.Close() mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests msg := []byte("Ant-Man") assert.True(t, mconn.Send(0x01, msg)) @@ -112,8 +114,8 @@ func TestMConnectionSend(t *testing.T) { func TestMConnectionReceive(t *testing.T) { server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck + defer server.Close() + defer client.Close() receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -126,12 +128,12 @@ func TestMConnectionReceive(t *testing.T) { mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn1.Start() require.Nil(t, err) - defer mconn1.Stop() + defer mconn1.Stop() // nolint:errcheck // ignore for tests mconn2 := createTestMConnection(server) err = mconn2.Start() require.Nil(t, err) - defer mconn2.Stop() + defer mconn2.Stop() // nolint:errcheck // ignore for tests msg := []byte("Cyclops") assert.True(t, mconn2.Send(0x01, msg)) @@ -148,13 +150,13 @@ func TestMConnectionReceive(t *testing.T) { func TestMConnectionStatus(t *testing.T) { server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck + defer server.Close() + defer client.Close() mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests status := mconn.Status() assert.NotNil(t, status) @@ -177,19 +179,19 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) go func() { // read ping - var pkt PacketPing - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - assert.Nil(t, err) + var pkt tmp2p.Packet + err := protoio.NewDelimitedReader(server, maxPingPongPacketSize).ReadMsg(&pkt) + require.NoError(t, err) serverGotPing <- struct{}{} }() <-serverGotPing - pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond + pongTimerExpired := mconn.config.PongTimeout + 200*time.Millisecond select { case msgBytes := <-receivedCh: t.Fatalf("Expected error, but got %v", msgBytes) @@ -216,29 +218,31 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests // sending 3 pongs in a row (abuse) - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) + protoWriter := protoio.NewDelimitedWriter(server) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) serverGotPing := make(chan struct{}) go func() { // read ping (one byte) - var ( - packet Packet - err error - ) - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &packet, maxPingPongPacketSize) - require.Nil(t, err) + var packet tmp2p.Packet + err := protoio.NewDelimitedReader(server, maxPingPongPacketSize).ReadMsg(&packet) + require.NoError(t, err) serverGotPing <- struct{}{} + // respond with pong - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) }() <-serverGotPing @@ -269,23 +273,31 @@ func TestMConnectionMultiplePings(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) - require.Nil(t, err) - var pkt PacketPong - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) - require.Nil(t, err) - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) - require.Nil(t, err) - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - require.Nil(t, err) + protoReader := protoio.NewDelimitedReader(server, maxPingPongPacketSize) + protoWriter := protoio.NewDelimitedWriter(server) + var pkt tmp2p.Packet + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) + require.NoError(t, err) + + err = protoReader.ReadMsg(&pkt) + require.NoError(t, err) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) + require.NoError(t, err) + + err = protoReader.ReadMsg(&pkt) + require.NoError(t, err) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) + require.NoError(t, err) + + err = protoReader.ReadMsg(&pkt) + require.NoError(t, err) assert.True(t, mconn.IsRunning()) } @@ -310,29 +322,36 @@ func TestMConnectionPingPongs(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests serverGotPing := make(chan struct{}) go func() { + protoReader := protoio.NewDelimitedReader(server, maxPingPongPacketSize) + protoWriter := protoio.NewDelimitedWriter(server) + var pkt tmp2p.PacketPing + // read ping - var pkt PacketPing - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - require.Nil(t, err) + err = protoReader.ReadMsg(&pkt) + require.NoError(t, err) serverGotPing <- struct{}{} + // respond with pong - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) time.Sleep(mconn.config.PingInterval) // read ping - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) - require.Nil(t, err) + err = protoReader.ReadMsg(&pkt) + require.NoError(t, err) + serverGotPing <- struct{}{} + // respond with pong - _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) - require.Nil(t, err) + _, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) + require.NoError(t, err) }() <-serverGotPing + <-serverGotPing pongTimerExpired := (mconn.config.PongTimeout + 20*time.Millisecond) * 2 select { @@ -347,8 +366,8 @@ func TestMConnectionPingPongs(t *testing.T) { func TestMConnectionStopsAndReturnsError(t *testing.T) { server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck + defer server.Close() + defer client.Close() receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -361,7 +380,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { mconn := createMConnectionWithCallbacks(client, onReceive, onError) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests if err := client.Close(); err != nil { t.Error(err) @@ -420,26 +439,30 @@ func expectSend(ch chan struct{}) bool { func TestMConnectionReadErrorBadEncoding(t *testing.T) { chOnErr := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() client := mconnClient.conn - // send badly encoded msgPacket - bz := cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{}) - bz[4] += 0x01 // Invalid prefix bytes. - // Write it. - _, err := client.Write(bz) - assert.Nil(t, err) + _, err := client.Write([]byte{1, 2, 3, 4, 5}) + require.NoError(t, err) assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") + + t.Cleanup(func() { + if err := mconnClient.Stop(); err != nil { + t.Log(err) + } + }) + + t.Cleanup(func() { + if err := mconnServer.Stop(); err != nil { + t.Log(err) + } + }) } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { chOnErr := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() msg := []byte("Ant-Man") @@ -450,6 +473,18 @@ func TestMConnectionReadErrorUnknownChannel(t *testing.T) { // should cause an error assert.True(t, mconnClient.Send(0x02, msg)) assert.True(t, expectSend(chOnErr), "unknown channel") + + t.Cleanup(func() { + if err := mconnClient.Stop(); err != nil { + t.Log(err) + } + }) + + t.Cleanup(func() { + if err := mconnServer.Stop(); err != nil { + t.Log(err) + } + }) } func TestMConnectionReadErrorLongMessage(t *testing.T) { @@ -457,54 +492,48 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnRcv := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() + defer mconnClient.Stop() // nolint:errcheck // ignore for tests + defer mconnServer.Stop() // nolint:errcheck // ignore for tests mconnServer.onReceive = func(chID byte, msgBytes []byte) { chOnRcv <- struct{}{} } client := mconnClient.conn + protoWriter := protoio.NewDelimitedWriter(client) // send msg thats just right - var err error - var buf = new(bytes.Buffer) - var packet = PacketMsg{ + var packet = tmp2p.PacketMsg{ ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), + EOF: true, + Data: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), } - _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) - assert.Nil(t, err) - _, err = client.Write(buf.Bytes()) - assert.Nil(t, err) + + _, err := protoWriter.WriteMsg(mustWrapPacket(&packet)) + require.NoError(t, err) assert.True(t, expectSend(chOnRcv), "msg just right") // send msg thats too long - buf = new(bytes.Buffer) - packet = PacketMsg{ + packet = tmp2p.PacketMsg{ ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), + EOF: true, + Data: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), } - _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) - assert.Nil(t, err) - _, err = client.Write(buf.Bytes()) - assert.NotNil(t, err) + + _, err = protoWriter.WriteMsg(mustWrapPacket(&packet)) + require.Error(t, err) assert.True(t, expectSend(chOnErr), "msg too long") } func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { chOnErr := make(chan struct{}) mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() + defer mconnClient.Stop() // nolint:errcheck // ignore for tests + defer mconnServer.Stop() // nolint:errcheck // ignore for tests // send msg with unknown msg type - err := amino.EncodeUvarint(mconnClient.conn, 4) - assert.Nil(t, err) - _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF}) - assert.Nil(t, err) + _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) + require.NoError(t, err) assert.True(t, expectSend(chOnErr), "unknown msg type") } @@ -516,12 +545,13 @@ func TestMConnectionTrySend(t *testing.T) { mconn := createTestMConnection(client) err := mconn.Start() require.Nil(t, err) - defer mconn.Stop() + defer mconn.Stop() // nolint:errcheck // ignore for tests msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) assert.True(t, mconn.TrySend(0x01, msg)) - server.Read(make([]byte, len(msg))) + _, err = server.Read(make([]byte, len(msg))) + require.NoError(t, err) assert.True(t, mconn.CanSend(0x01)) assert.True(t, mconn.TrySend(0x01, msg)) assert.False(t, mconn.CanSend(0x01)) @@ -533,3 +563,27 @@ func TestMConnectionTrySend(t *testing.T) { assert.False(t, mconn.TrySend(0x01, msg)) assert.Equal(t, "TrySend", <-resultCh) } + +// nolint:lll //ignore line length for tests +func TestConnVectors(t *testing.T) { + + testCases := []struct { + testName string + msg proto.Message + expBytes string + }{ + {"PacketPing", &tmp2p.PacketPing{}, "0a00"}, + {"PacketPong", &tmp2p.PacketPong{}, "1200"}, + {"PacketMsg", &tmp2p.PacketMsg{ChannelID: 1, EOF: false, Data: []byte("data transmitted over the wire")}, "1a2208011a1e64617461207472616e736d6974746564206f766572207468652077697265"}, + } + + for _, tc := range testCases { + tc := tc + + pm := mustWrapPacket(tc.msg) + bz, err := pm.Marshal() + require.NoError(t, err, tc.testName) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } +} diff --git a/p2p/conn/evil_secret_connection_test.go b/p2p/conn/evil_secret_connection_test.go new file mode 100644 index 000000000..43e4d8743 --- /dev/null +++ b/p2p/conn/evil_secret_connection_test.go @@ -0,0 +1,273 @@ +package conn + +import ( + "bytes" + "errors" + "io" + "testing" + + gogotypes "github.com/gogo/protobuf/types" + "github.com/gtank/merlin" + "github.com/stretchr/testify/assert" + "golang.org/x/crypto/chacha20poly1305" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/libs/protoio" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" +) + +type buffer struct { + next bytes.Buffer +} + +func (b *buffer) Read(data []byte) (n int, err error) { + return b.next.Read(data) +} + +func (b *buffer) Write(data []byte) (n int, err error) { + return b.next.Write(data) +} + +func (b *buffer) Bytes() []byte { + return b.next.Bytes() +} + +func (b *buffer) Close() error { + return nil +} + +type evilConn struct { + secretConn *SecretConnection + buffer *buffer + + locEphPub *[32]byte + locEphPriv *[32]byte + remEphPub *[32]byte + privKey crypto.PrivKey + + readStep int + writeStep int + readOffset int + + shareEphKey bool + badEphKey bool + shareAuthSignature bool + badAuthSignature bool +} + +func newEvilConn(shareEphKey, badEphKey, shareAuthSignature, badAuthSignature bool) *evilConn { + privKey := ed25519.GenPrivKey() + locEphPub, locEphPriv := genEphKeys() + var rep [32]byte + c := &evilConn{ + locEphPub: locEphPub, + locEphPriv: locEphPriv, + remEphPub: &rep, + privKey: privKey, + + shareEphKey: shareEphKey, + badEphKey: badEphKey, + shareAuthSignature: shareAuthSignature, + badAuthSignature: badAuthSignature, + } + + return c +} + +func (c *evilConn) Read(data []byte) (n int, err error) { + if !c.shareEphKey { + return 0, io.EOF + } + + switch c.readStep { + case 0: + if !c.badEphKey { + lc := *c.locEphPub + bz, err := protoio.MarshalDelimited(&gogotypes.BytesValue{Value: lc[:]}) + if err != nil { + panic(err) + } + copy(data, bz[c.readOffset:]) + n = len(data) + } else { + bz, err := protoio.MarshalDelimited(&gogotypes.BytesValue{Value: []byte("drop users;")}) + if err != nil { + panic(err) + } + copy(data, bz) + n = len(data) + } + c.readOffset += n + + if n >= 32 { + c.readOffset = 0 + c.readStep = 1 + if !c.shareAuthSignature { + c.readStep = 2 + } + } + + return n, nil + case 1: + signature := c.signChallenge() + if !c.badAuthSignature { + pkpb, err := cryptoenc.PubKeyToProto(c.privKey.PubKey()) + if err != nil { + panic(err) + } + bz, err := protoio.MarshalDelimited(&tmp2p.AuthSigMessage{PubKey: pkpb, Sig: signature}) + if err != nil { + panic(err) + } + n, err = c.secretConn.Write(bz) + if err != nil { + panic(err) + } + if c.readOffset > len(c.buffer.Bytes()) { + return len(data), nil + } + copy(data, c.buffer.Bytes()[c.readOffset:]) + } else { + bz, err := protoio.MarshalDelimited(&gogotypes.BytesValue{Value: []byte("select * from users;")}) + if err != nil { + panic(err) + } + n, err = c.secretConn.Write(bz) + if err != nil { + panic(err) + } + if c.readOffset > len(c.buffer.Bytes()) { + return len(data), nil + } + copy(data, c.buffer.Bytes()) + } + c.readOffset += len(data) + return n, nil + default: + return 0, io.EOF + } +} + +func (c *evilConn) Write(data []byte) (n int, err error) { + switch c.writeStep { + case 0: + var ( + bytes gogotypes.BytesValue + remEphPub [32]byte + ) + err := protoio.UnmarshalDelimited(data, &bytes) + if err != nil { + panic(err) + } + copy(remEphPub[:], bytes.Value) + c.remEphPub = &remEphPub + c.writeStep = 1 + if !c.shareAuthSignature { + c.writeStep = 2 + } + return len(data), nil + case 1: + // Signature is not needed, therefore skipped. + return len(data), nil + default: + return 0, io.EOF + } +} + +func (c *evilConn) Close() error { + return nil +} + +func (c *evilConn) signChallenge() []byte { + // Sort by lexical order. + loEphPub, hiEphPub := sort32(c.locEphPub, c.remEphPub) + + transcript := merlin.NewTranscript("TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH") + + transcript.AppendMessage(labelEphemeralLowerPublicKey, loEphPub[:]) + transcript.AppendMessage(labelEphemeralUpperPublicKey, hiEphPub[:]) + + // Check if the local ephemeral public key was the least, lexicographically + // sorted. + locIsLeast := bytes.Equal(c.locEphPub[:], loEphPub[:]) + + // Compute common diffie hellman secret using X25519. + dhSecret, err := computeDHSecret(c.remEphPub, c.locEphPriv) + if err != nil { + panic(err) + } + + transcript.AppendMessage(labelDHSecret, dhSecret[:]) + + // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 + // on the transcript state (which itself also uses HKDF-SHA2 to derive a key + // from the dhSecret). + recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) + + const challengeSize = 32 + var challenge [challengeSize]byte + challengeSlice := transcript.ExtractBytes(labelSecretConnectionMac, challengeSize) + + copy(challenge[:], challengeSlice[0:challengeSize]) + + sendAead, err := chacha20poly1305.New(sendSecret[:]) + if err != nil { + panic(errors.New("invalid send SecretConnection Key")) + } + recvAead, err := chacha20poly1305.New(recvSecret[:]) + if err != nil { + panic(errors.New("invalid receive SecretConnection Key")) + } + + b := &buffer{} + c.secretConn = &SecretConnection{ + conn: b, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + } + c.buffer = b + + // Sign the challenge bytes for authentication. + locSignature, err := signChallenge(&challenge, c.privKey) + if err != nil { + panic(err) + } + + return locSignature +} + +// TestMakeSecretConnection creates an evil connection and tests that +// MakeSecretConnection errors at different stages. +func TestMakeSecretConnection(t *testing.T) { + testCases := []struct { + name string + conn *evilConn + errMsg string + }{ + {"refuse to share ethimeral key", newEvilConn(false, false, false, false), "EOF"}, + {"share bad ethimeral key", newEvilConn(true, true, false, false), "wrong wireType"}, + {"refuse to share auth signature", newEvilConn(true, false, false, false), "EOF"}, + {"share bad auth signature", newEvilConn(true, false, true, true), "failed to decrypt SecretConnection"}, + {"all good", newEvilConn(true, false, true, false), ""}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + privKey := ed25519.GenPrivKey() + _, err := MakeSecretConnection(tc.conn, privKey) + if tc.errMsg != "" { + if assert.Error(t, err) { + assert.Contains(t, err.Error(), tc.errMsg) + } + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index c14f1bb5c..041224772 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -6,15 +6,16 @@ import ( crand "crypto/rand" "crypto/sha256" "encoding/binary" + "errors" + "fmt" "io" "math" "net" - "sync" "time" + gogotypes "github.com/gogo/protobuf/types" "github.com/gtank/merlin" pool "github.com/libp2p/go-buffer-pool" - "github.com/pkg/errors" "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/hkdf" @@ -22,7 +23,11 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/async" + "github.com/tendermint/tendermint/libs/protoio" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) // 4 + 1024 == 1028 total frame size @@ -71,11 +76,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx sync.Mutex + recvMtx tmsync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx sync.Mutex + sendMtx tmsync.Mutex sendNonce *[aeadNonceSize]byte } @@ -150,7 +155,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* } // Sign the challenge bytes for authentication. - locSignature := signChallenge(&challenge, locPrivKey) + locSignature, err := signChallenge(&challenge, locPrivKey) + if err != nil { + return nil, err + } // Share (in secret) each other's pubkey & challenge signature authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) @@ -159,10 +167,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* } remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig - if _, ok := remPubKey.(ed25519.PubKeyEd25519); !ok { - return nil, errors.Errorf("expected ed25519 pubkey, got %T", remPubKey) + if _, ok := remPubKey.(ed25519.PubKey); !ok { + return nil, fmt.Errorf("expected ed25519 pubkey, got %T", remPubKey) } - if !remPubKey.VerifyBytes(challenge[:], remSignature) { + if !remPubKey.VerifySignature(challenge[:], remSignature) { return nil, errors.New("challenge verification failed") } @@ -246,7 +254,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { defer pool.Put(frame) _, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil) if err != nil { - return n, errors.New("failed to decrypt SecretConnection") + return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) } incrNonce(sc.recvNonce) // end decryption @@ -296,18 +304,22 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt // Send our pubkey and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(conn, locEphPub) - if err1 != nil { - return nil, true, err1 // abort + lc := *locEphPub + _, err = protoio.NewDelimitedWriter(conn).WriteMsg(&gogotypes.BytesValue{Value: lc[:]}) + if err != nil { + return nil, true, err // abort } return nil, false, nil }, func(_ int) (val interface{}, abort bool, err error) { - var _remEphPub [32]byte - var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(conn, &_remEphPub, 1024*1024) // TODO - if err2 != nil { - return nil, true, err2 // abort + var bytes gogotypes.BytesValue + err = protoio.NewDelimitedReader(conn, 1024*1024).ReadMsg(&bytes) + if err != nil { + return nil, true, err // abort } + + var _remEphPub [32]byte + copy(_remEphPub[:], bytes.Value) return _remEphPub, false, nil }, ) @@ -377,13 +389,12 @@ func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { return } -func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature []byte) { +func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) ([]byte, error) { signature, err := locPrivKey.Sign(challenge[:]) - // TODO(ismail): let signChallenge return an error instead if err != nil { - panic(err) + return nil, err } - return + return signature, nil } type authSigMessage struct { @@ -396,17 +407,31 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(sc, authSigMessage{pubKey, signature}) - if err1 != nil { - return nil, true, err1 // abort + pbpk, err := cryptoenc.PubKeyToProto(pubKey) + if err != nil { + return nil, true, err + } + _, err = protoio.NewDelimitedWriter(sc).WriteMsg(&tmp2p.AuthSigMessage{PubKey: pbpk, Sig: signature}) + if err != nil { + return nil, true, err // abort } return nil, false, nil }, func(_ int) (val interface{}, abort bool, err error) { - var _recvMsg authSigMessage - var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(sc, &_recvMsg, 1024*1024) // TODO - if err2 != nil { - return nil, true, err2 // abort + var pba tmp2p.AuthSigMessage + err = protoio.NewDelimitedReader(sc, 1024*1024).ReadMsg(&pba) + if err != nil { + return nil, true, err // abort + } + + pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + if err != nil { + return nil, true, err // abort + } + + _recvMsg := authSigMessage{ + Key: pk, + Sig: pba.Sig, } return _recvMsg, false, nil }, diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 9044d73be..e787e1348 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "io" + "io/ioutil" "log" "os" "path/filepath" @@ -19,12 +20,15 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/libs/async" - tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" ) +// Run go test -update from within this module +// to update the golden test vector file +var update = flag.Bool("update", false, "update .golden files") + type kvstoreConn struct { *io.PipeReader *io.PipeWriter @@ -39,60 +43,15 @@ func (drw kvstoreConn) Close() (err error) { return err1 } -// Each returned ReadWriteCloser is akin to a net.Connection -func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { - barReader, fooWriter := io.Pipe() - fooReader, barWriter := io.Pipe() - return kvstoreConn{fooReader, fooWriter}, kvstoreConn{barReader, barWriter} +type privKeyWithNilPubKey struct { + orig crypto.PrivKey } -func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { - - var fooConn, barConn = makeKVStoreConnPair() - var fooPrvKey = ed25519.GenPrivKey() - var fooPubKey = fooPrvKey.PubKey() - var barPrvKey = ed25519.GenPrivKey() - var barPubKey = barPrvKey.PubKey() - - // Make connections from both sides in parallel. - var trs, ok = async.Parallel( - func(_ int) (val interface{}, abort bool, err error) { - fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) - if err != nil { - tb.Errorf("failed to establish SecretConnection for foo: %v", err) - return nil, true, err - } - remotePubBytes := fooSecConn.RemotePubKey() - if !remotePubBytes.Equals(barPubKey) { - err = fmt.Errorf("unexpected fooSecConn.RemotePubKey. Expected %v, got %v", - barPubKey, fooSecConn.RemotePubKey()) - tb.Error(err) - return nil, false, err - } - return nil, false, nil - }, - func(_ int) (val interface{}, abort bool, err error) { - barSecConn, err = MakeSecretConnection(barConn, barPrvKey) - if barSecConn == nil { - tb.Errorf("failed to establish SecretConnection for bar: %v", err) - return nil, true, err - } - remotePubBytes := barSecConn.RemotePubKey() - if !remotePubBytes.Equals(fooPubKey) { - err = fmt.Errorf("unexpected barSecConn.RemotePubKey. Expected %v, got %v", - fooPubKey, barSecConn.RemotePubKey()) - tb.Error(err) - return nil, false, nil - } - return nil, false, nil - }, - ) - - require.Nil(tb, trs.FirstError()) - require.True(tb, ok, "Unexpected task abortion") - - return fooSecConn, barSecConn -} +func (pk privKeyWithNilPubKey) Bytes() []byte { return pk.orig.Bytes() } +func (pk privKeyWithNilPubKey) Sign(msg []byte) ([]byte, error) { return pk.orig.Sign(msg) } +func (pk privKeyWithNilPubKey) PubKey() crypto.PubKey { return nil } +func (pk privKeyWithNilPubKey) Equals(pk2 crypto.PrivKey) bool { return pk.orig.Equals(pk2) } +func (pk privKeyWithNilPubKey) Type() string { return "privKeyWithNilPubKey" } func TestSecretConnectionHandshake(t *testing.T) { fooSecConn, barSecConn := makeSecretConnPair(t) @@ -148,26 +107,6 @@ func TestConcurrentRead(t *testing.T) { } } -func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n int) { - defer wg.Done() - for i := 0; i < n; i++ { - _, err := conn.Write([]byte(txt)) - if err != nil { - t.Errorf("failed to write to fooSecConn: %v", err) - return - } - } -} - -func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { - readBuffer := make([]byte, dataMaxSize) - for i := 0; i < n; i++ { - _, err := conn.Read(readBuffer) - assert.NoError(t, err) - } - wg.Done() -} - func TestSecretConnectionReadWrite(t *testing.T) { fooConn, barConn := makeKVStoreConnPair() fooWrites, barWrites := []string{}, []string{} @@ -282,19 +221,14 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads(fooWrites, barReads) compareWritesReads(barWrites, fooReads) - } -// Run go test -update from within this module -// to update the golden test vector file -var update = flag.Bool("update", false, "update .golden files") - func TestDeriveSecretsAndChallengeGolden(t *testing.T) { goldenFilepath := filepath.Join("testdata", t.Name()+".golden") if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - tmos.WriteFile(goldenFilepath, []byte(data), 0644) + require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { @@ -322,49 +256,52 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { } } -type privKeyWithNilPubKey struct { - orig crypto.PrivKey -} - -func (pk privKeyWithNilPubKey) Bytes() []byte { return pk.orig.Bytes() } -func (pk privKeyWithNilPubKey) Sign(msg []byte) ([]byte, error) { return pk.orig.Sign(msg) } -func (pk privKeyWithNilPubKey) PubKey() crypto.PubKey { return nil } -func (pk privKeyWithNilPubKey) Equals(pk2 crypto.PrivKey) bool { return pk.orig.Equals(pk2) } - func TestNilPubkey(t *testing.T) { var fooConn, barConn = makeKVStoreConnPair() + defer fooConn.Close() + defer barConn.Close() var fooPrvKey = ed25519.GenPrivKey() var barPrvKey = privKeyWithNilPubKey{ed25519.GenPrivKey()} - go func() { - _, err := MakeSecretConnection(barConn, barPrvKey) - assert.NoError(t, err) - }() + go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests - assert.NotPanics(t, func() { - _, err := MakeSecretConnection(fooConn, fooPrvKey) - if assert.Error(t, err) { - assert.Equal(t, "expected ed25519 pubkey, got ", err.Error()) - } - }) + _, err := MakeSecretConnection(barConn, barPrvKey) + require.Error(t, err) + assert.Equal(t, "toproto: key type is not supported", err.Error()) } func TestNonEd25519Pubkey(t *testing.T) { var fooConn, barConn = makeKVStoreConnPair() + defer fooConn.Close() + defer barConn.Close() var fooPrvKey = ed25519.GenPrivKey() - var barPrvKey = secp256k1.GenPrivKey() + var barPrvKey = sr25519.GenPrivKey() - go func() { - _, err := MakeSecretConnection(barConn, barPrvKey) - assert.NoError(t, err) - }() + go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests + + _, err := MakeSecretConnection(barConn, barPrvKey) + require.Error(t, err) + assert.Contains(t, err.Error(), "is not supported") +} - assert.NotPanics(t, func() { - _, err := MakeSecretConnection(fooConn, fooPrvKey) - if assert.Error(t, err) { - assert.Equal(t, "expected ed25519 pubkey, got secp256k1.PubKeySecp256k1", err.Error()) +func writeLots(t *testing.T, wg *sync.WaitGroup, conn io.Writer, txt string, n int) { + defer wg.Done() + for i := 0; i < n; i++ { + _, err := conn.Write([]byte(txt)) + if err != nil { + t.Errorf("failed to write to fooSecConn: %v", err) + return } - }) + } +} + +func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { + readBuffer := make([]byte, dataMaxSize) + for i := 0; i < n; i++ { + _, err := conn.Read(readBuffer) + assert.NoError(t, err) + } + wg.Done() } // Creates the data for a test vector file. @@ -386,6 +323,64 @@ func createGoldenTestVectors(t *testing.T) string { return data } +// Each returned ReadWriteCloser is akin to a net.Connection +func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { + barReader, fooWriter := io.Pipe() + fooReader, barWriter := io.Pipe() + return kvstoreConn{fooReader, fooWriter}, kvstoreConn{barReader, barWriter} +} + +func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { + var ( + fooConn, barConn = makeKVStoreConnPair() + fooPrvKey = ed25519.GenPrivKey() + fooPubKey = fooPrvKey.PubKey() + barPrvKey = ed25519.GenPrivKey() + barPubKey = barPrvKey.PubKey() + ) + + // Make connections from both sides in parallel. + var trs, ok = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) + if err != nil { + tb.Errorf("failed to establish SecretConnection for foo: %v", err) + return nil, true, err + } + remotePubBytes := fooSecConn.RemotePubKey() + if !remotePubBytes.Equals(barPubKey) { + err = fmt.Errorf("unexpected fooSecConn.RemotePubKey. Expected %v, got %v", + barPubKey, fooSecConn.RemotePubKey()) + tb.Error(err) + return nil, true, err + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + barSecConn, err = MakeSecretConnection(barConn, barPrvKey) + if barSecConn == nil { + tb.Errorf("failed to establish SecretConnection for bar: %v", err) + return nil, true, err + } + remotePubBytes := barSecConn.RemotePubKey() + if !remotePubBytes.Equals(fooPubKey) { + err = fmt.Errorf("unexpected barSecConn.RemotePubKey. Expected %v, got %v", + fooPubKey, barSecConn.RemotePubKey()) + tb.Error(err) + return nil, true, err + } + return nil, false, nil + }, + ) + + require.Nil(tb, trs.FirstError()) + require.True(tb, ok, "Unexpected task abortion") + + return fooSecConn, barSecConn +} + +// Benchmarks + func BenchmarkWriteSecretConnection(b *testing.B) { b.StopTimer() b.ReportAllocs() @@ -431,7 +426,7 @@ func BenchmarkWriteSecretConnection(b *testing.B) { if err := fooSecConn.Close(); err != nil { b.Error(err) } - //barSecConn.Close() race condition + // barSecConn.Close() race condition } func BenchmarkReadSecretConnection(b *testing.B) { diff --git a/p2p/conn_set.go b/p2p/conn_set.go index a889ad5e1..376510d72 100644 --- a/p2p/conn_set.go +++ b/p2p/conn_set.go @@ -2,7 +2,8 @@ package p2p import ( "net" - "sync" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) // ConnSet is a lookup table for connections and all their ips. @@ -20,7 +21,7 @@ type connSetItem struct { } type connSet struct { - sync.RWMutex + tmsync.RWMutex conns map[string]connSetItem } diff --git a/p2p/fuzz.go b/p2p/fuzz.go deleted file mode 100644 index 878a59c2f..000000000 --- a/p2p/fuzz.go +++ /dev/null @@ -1,153 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - tmrand "github.com/tendermint/tendermint/libs/rand" -) - -// FuzzedConnection wraps any net.Conn and depending on the mode either delays -// reads/writes or randomly drops reads/writes/connections. -type FuzzedConnection struct { - conn net.Conn - - mtx sync.Mutex - start <-chan time.Time - active bool - - config *config.FuzzConnConfig -} - -// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately. -func FuzzConn(conn net.Conn) net.Conn { - return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig()) -} - -// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing -// starts immediately. -func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: make(<-chan time.Time), - active: true, - config: config, - } -} - -// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the -// duration elapses. -func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn { - return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig()) -} - -// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config. -// Fuzzing starts when the duration elapses. -func FuzzConnAfterFromConfig( - conn net.Conn, - d time.Duration, - config *config.FuzzConnConfig, -) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: time.After(d), - active: false, - config: config, - } -} - -// Config returns the connection's config. -func (fc *FuzzedConnection) Config() *config.FuzzConnConfig { - return fc.config -} - -// Read implements net.Conn. -func (fc *FuzzedConnection) Read(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Read(data) -} - -// Write implements net.Conn. -func (fc *FuzzedConnection) Write(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Write(data) -} - -// Close implements net.Conn. -func (fc *FuzzedConnection) Close() error { return fc.conn.Close() } - -// LocalAddr implements net.Conn. -func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() } - -// RemoteAddr implements net.Conn. -func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() } - -// SetDeadline implements net.Conn. -func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) } - -// SetReadDeadline implements net.Conn. -func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error { - return fc.conn.SetReadDeadline(t) -} - -// SetWriteDeadline implements net.Conn. -func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { - return fc.conn.SetWriteDeadline(t) -} - -func (fc *FuzzedConnection) randomDuration() time.Duration { - maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) - return time.Millisecond * time.Duration(tmrand.Int()%maxDelayMillis) // nolint: gas -} - -// implements the fuzz (delay, kill conn) -// and returns whether or not the read/write should be ignored -func (fc *FuzzedConnection) fuzz() bool { - if !fc.shouldFuzz() { - return false - } - - switch fc.config.Mode { - case config.FuzzModeDrop: - // randomly drop the r/w, drop the conn, or sleep - r := tmrand.Float64() - switch { - case r <= fc.config.ProbDropRW: - return true - case r < fc.config.ProbDropRW+fc.config.ProbDropConn: - // XXX: can't this fail because machine precision? - // XXX: do we need an error? - fc.Close() // nolint: errcheck, gas - return true - case r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep: - time.Sleep(fc.randomDuration()) - } - case config.FuzzModeDelay: - // sleep a bit - time.Sleep(fc.randomDuration()) - } - return false -} - -func (fc *FuzzedConnection) shouldFuzz() bool { - if fc.active { - return true - } - - fc.mtx.Lock() - defer fc.mtx.Unlock() - - select { - case <-fc.start: - fc.active = true - return true - default: - return false - } -} diff --git a/p2p/key.go b/p2p/key.go index f4c9413ab..57cdd4de1 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -1,13 +1,12 @@ package p2p import ( - "bytes" "encoding/hex" - "fmt" "io/ioutil" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + tmjson "github.com/tendermint/tendermint/libs/json" tmos "github.com/tendermint/tendermint/libs/os" ) @@ -44,8 +43,8 @@ func PubKeyToID(pubKey crypto.PubKey) ID { return ID(hex.EncodeToString(pubKey.Address())) } -// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. -// If the file does not exist, it generates and saves a new NodeKey. +// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If +// the file does not exist, it generates and saves a new NodeKey. func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { if tmos.FileExists(filePath) { nodeKey, err := LoadNodeKey(filePath) @@ -54,59 +53,42 @@ func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { } return nodeKey, nil } - return genNodeKey(filePath) + + privKey := ed25519.GenPrivKey() + nodeKey := &NodeKey{ + PrivKey: privKey, + } + + if err := nodeKey.SaveAs(filePath); err != nil { + return nil, err + } + + return nodeKey, nil } +// LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (*NodeKey, error) { jsonBytes, err := ioutil.ReadFile(filePath) if err != nil { return nil, err } nodeKey := new(NodeKey) - err = cdc.UnmarshalJSON(jsonBytes, nodeKey) + err = tmjson.Unmarshal(jsonBytes, nodeKey) if err != nil { - return nil, fmt.Errorf("error reading NodeKey from %v: %v", filePath, err) + return nil, err } return nodeKey, nil } -func genNodeKey(filePath string) (*NodeKey, error) { - privKey := ed25519.GenPrivKey() - nodeKey := &NodeKey{ - PrivKey: privKey, - } - - jsonBytes, err := cdc.MarshalJSON(nodeKey) +// SaveAs persists the NodeKey to filePath. +func (nodeKey *NodeKey) SaveAs(filePath string) error { + jsonBytes, err := tmjson.Marshal(nodeKey) if err != nil { - return nil, err + return err } err = ioutil.WriteFile(filePath, jsonBytes, 0600) if err != nil { - return nil, err - } - return nodeKey, nil -} - -//------------------------------------------------------------------------------ - -// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. -// It can be used as a Proof of Work target. -// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits. -func MakePoWTarget(difficulty, targetBits uint) []byte { - if targetBits%8 != 0 { - panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits)) - } - if difficulty >= targetBits { - panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) - } - targetBytes := targetBits / 8 - zeroPrefixLen := (int(difficulty) / 8) - prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) - mod := (difficulty % 8) - if mod > 0 { - nonZeroPrefix := byte(1<<(8-mod) - 1) - prefix = append(prefix, nonZeroPrefix) + return err } - tailLen := int(targetBytes) - len(prefix) - return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) + return nil } diff --git a/p2p/key_test.go b/p2p/key_test.go index 6f8e9b0f8..22cccf743 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -1,13 +1,14 @@ package p2p import ( - "bytes" "os" "path/filepath" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" tmrand "github.com/tendermint/tendermint/libs/rand" ) @@ -23,29 +24,30 @@ func TestLoadOrGenNodeKey(t *testing.T) { assert.Equal(t, nodeKey, nodeKey2) } -//---------------------------------------------------------- +func TestLoadNodeKey(t *testing.T) { + filePath := filepath.Join(os.TempDir(), tmrand.Str(12)+"_peer_id.json") + + _, err := LoadNodeKey(filePath) + assert.True(t, os.IsNotExist(err)) + + _, err = LoadOrGenNodeKey(filePath) + require.NoError(t, err) -func padBytes(bz []byte, targetBytes int) []byte { - return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...) + nodeKey, err := LoadNodeKey(filePath) + assert.NoError(t, err) + assert.NotNil(t, nodeKey) } -func TestPoWTarget(t *testing.T) { - - targetBytes := 20 - cases := []struct { - difficulty uint - target []byte - }{ - {0, padBytes([]byte{}, targetBytes)}, - {1, padBytes([]byte{127}, targetBytes)}, - {8, padBytes([]byte{0}, targetBytes)}, - {9, padBytes([]byte{0, 127}, targetBytes)}, - {10, padBytes([]byte{0, 63}, targetBytes)}, - {16, padBytes([]byte{0, 0}, targetBytes)}, - {17, padBytes([]byte{0, 0, 127}, targetBytes)}, - } +func TestNodeKeySaveAs(t *testing.T) { + filePath := filepath.Join(os.TempDir(), tmrand.Str(12)+"_peer_id.json") + + assert.NoFileExists(t, filePath) - for _, c := range cases { - assert.Equal(t, MakePoWTarget(c.difficulty, 20*8), c.target) + privKey := ed25519.GenPrivKey() + nodeKey := &NodeKey{ + PrivKey: privKey, } + err := nodeKey.SaveAs(filePath) + assert.NoError(t, err) + assert.FileExists(t, filePath) } diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index b2db913b8..59f6e0f4a 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -36,11 +36,13 @@ func NewPeer(ip net.IP) *Peer { kv: make(map[string]interface{}), } mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) - mp.Start() + if err := mp.Start(); err != nil { + panic(err) + } return mp } -func (mp *Peer) FlushStop() { mp.Stop() } +func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } func (mp *Peer) NodeInfo() p2p.NodeInfo { diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go new file mode 100644 index 000000000..704f965cd --- /dev/null +++ b/p2p/mocks/peer.go @@ -0,0 +1,331 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + log "github.com/tendermint/tendermint/libs/log" + conn "github.com/tendermint/tendermint/p2p/conn" + + mock "github.com/stretchr/testify/mock" + + net "net" + + p2p "github.com/tendermint/tendermint/p2p" +) + +// Peer is an autogenerated mock type for the Peer type +type Peer struct { + mock.Mock +} + +// CloseConn provides a mock function with given fields: +func (_m *Peer) CloseConn() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FlushStop provides a mock function with given fields: +func (_m *Peer) FlushStop() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Peer) Get(_a0 string) interface{} { + ret := _m.Called(_a0) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(string) interface{}); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// ID provides a mock function with given fields: +func (_m *Peer) ID() p2p.ID { + ret := _m.Called() + + var r0 p2p.ID + if rf, ok := ret.Get(0).(func() p2p.ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(p2p.ID) + } + + return r0 +} + +// IsOutbound provides a mock function with given fields: +func (_m *Peer) IsOutbound() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsPersistent provides a mock function with given fields: +func (_m *Peer) IsPersistent() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsRunning provides a mock function with given fields: +func (_m *Peer) IsRunning() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NodeInfo provides a mock function with given fields: +func (_m *Peer) NodeInfo() p2p.NodeInfo { + ret := _m.Called() + + var r0 p2p.NodeInfo + if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(p2p.NodeInfo) + } + } + + return r0 +} + +// OnReset provides a mock function with given fields: +func (_m *Peer) OnReset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStart provides a mock function with given fields: +func (_m *Peer) OnStart() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStop provides a mock function with given fields: +func (_m *Peer) OnStop() { + _m.Called() +} + +// Quit provides a mock function with given fields: +func (_m *Peer) Quit() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Peer) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// RemoteIP provides a mock function with given fields: +func (_m *Peer) RemoteIP() net.IP { + ret := _m.Called() + + var r0 net.IP + if rf, ok := ret.Get(0).(func() net.IP); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.IP) + } + } + + return r0 +} + +// Reset provides a mock function with given fields: +func (_m *Peer) Reset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Send provides a mock function with given fields: _a0, _a1 +func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Set provides a mock function with given fields: _a0, _a1 +func (_m *Peer) Set(_a0 string, _a1 interface{}) { + _m.Called(_a0, _a1) +} + +// SetLogger provides a mock function with given fields: _a0 +func (_m *Peer) SetLogger(_a0 log.Logger) { + _m.Called(_a0) +} + +// SocketAddr provides a mock function with given fields: +func (_m *Peer) SocketAddr() *p2p.NetAddress { + ret := _m.Called() + + var r0 *p2p.NetAddress + if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*p2p.NetAddress) + } + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *Peer) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with given fields: +func (_m *Peer) Status() conn.ConnectionStatus { + ret := _m.Called() + + var r0 conn.ConnectionStatus + if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(conn.ConnectionStatus) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Peer) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *Peer) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TrySend provides a mock function with given fields: _a0, _a1 +func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} diff --git a/p2p/netaddress.go b/p2p/netaddress.go index c71f3ce7f..77209217b 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -6,6 +6,7 @@ package p2p import ( "encoding/hex" + "errors" "flag" "fmt" "net" @@ -13,21 +14,18 @@ import ( "strings" "time" - "github.com/pkg/errors" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) +// EmptyNetAddress defines the string representation of an empty NetAddress +const EmptyNetAddress = "" + // NetAddress defines information about a peer on the network // including its ID, IP address, and port. type NetAddress struct { ID ID `json:"id"` IP net.IP `json:"ip"` Port uint16 `json:"port"` - - // TODO: - // Name string `json:"name"` // optional DNS name - - // memoize .String() - str string } // IDAddressString returns id@hostPort. It strips the leading @@ -138,6 +136,55 @@ func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { } } +// NetAddressFromProto converts a Protobuf NetAddress into a native struct. +func NetAddressFromProto(pb tmp2p.NetAddress) (*NetAddress, error) { + ip := net.ParseIP(pb.IP) + if ip == nil { + return nil, fmt.Errorf("invalid IP address %v", pb.IP) + } + if pb.Port >= 1<<16 { + return nil, fmt.Errorf("invalid port number %v", pb.Port) + } + return &NetAddress{ + ID: ID(pb.ID), + IP: ip, + Port: uint16(pb.Port), + }, nil +} + +// NetAddressesFromProto converts a slice of Protobuf NetAddresses into a native slice. +func NetAddressesFromProto(pbs []tmp2p.NetAddress) ([]*NetAddress, error) { + nas := make([]*NetAddress, 0, len(pbs)) + for _, pb := range pbs { + na, err := NetAddressFromProto(pb) + if err != nil { + return nil, err + } + nas = append(nas, na) + } + return nas, nil +} + +// NetAddressesToProto converts a slice of NetAddresses into a Protobuf slice. +func NetAddressesToProto(nas []*NetAddress) []tmp2p.NetAddress { + pbs := make([]tmp2p.NetAddress, 0, len(nas)) + for _, na := range nas { + if na != nil { + pbs = append(pbs, na.ToProto()) + } + } + return pbs +} + +// ToProto converts a NetAddress to Protobuf. +func (na *NetAddress) ToProto() tmp2p.NetAddress { + return tmp2p.NetAddress{ + ID: string(na.ID), + IP: na.IP.String(), + Port: uint32(na.Port), + } +} + // Equals reports whether na and other are the same addresses, // including their ID, IP, and Port. func (na *NetAddress) Equals(other interface{}) bool { @@ -163,16 +210,15 @@ func (na *NetAddress) Same(other interface{}) bool { // String representation: @: func (na *NetAddress) String() string { if na == nil { - return "" + return EmptyNetAddress } - if na.str == "" { - addrStr := na.DialString() - if na.ID != "" { - addrStr = IDAddressString(na.ID, addrStr) - } - na.str = addrStr + + addrStr := na.DialString() + if na.ID != "" { + addrStr = IDAddressString(na.ID, addrStr) } - return na.str + + return addrStr } func (na *NetAddress) DialString() string { @@ -217,7 +263,7 @@ func (na *NetAddress) Routable() bool { // address or one that matches the RFC3849 documentation address format. func (na *NetAddress) Valid() error { if err := validateID(na.ID); err != nil { - return errors.Wrap(err, "invalid ID") + return fmt.Errorf("invalid ID: %w", err) } if na.IP == nil { @@ -313,21 +359,43 @@ var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} +var ( + // onionCatNet defines the IPv6 address block used to support Tor. + // bitcoind encodes a .onion address as a 16 byte number by decoding the + // address prior to the .onion (i.e. the key hash) base32 into a ten + // byte number. It then stores the first 6 bytes of the address as + // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. + // + // This is the same range used by OnionCat, which is part part of the + // RFC4193 unique local IPv6 range. + // + // In summary the format is: + // { magic 6 bytes, 10 bytes base32 decode of key hash } + onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) +) + +// ipNet returns a net.IPNet struct given the passed IP address string, number +// of one bits to include at the start of the mask, and the total number of bits +// for the mask. +func ipNet(ip string, ones, bits int) net.IPNet { + return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} +} func (na *NetAddress) RFC1918() bool { return rfc1918_10.Contains(na.IP) || rfc1918_192.Contains(na.IP) || rfc1918_172.Contains(na.IP) } -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } +func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } +func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } +func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } +func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } +func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } +func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } +func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } +func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } +func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } +func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } func removeProtocolIfDefined(addr string) string { if strings.Contains(addr, "://") { diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 4a9ef333d..65f9fb834 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -2,12 +2,35 @@ package p2p import ( "net" + "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestNetAddress_String(t *testing.T) { + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") + require.Nil(t, err) + + netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = netAddr.String() + }() + } + + wg.Wait() + + s := netAddr.String() + require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) +} + func TestNewNetAddress(t *testing.T) { tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") require.Nil(t, err) diff --git a/p2p/node_info.go b/p2p/node_info.go index 24d5e11e1..8acc23676 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -1,11 +1,13 @@ package p2p import ( + "errors" "fmt" "reflect" "github.com/tendermint/tendermint/libs/bytes" tmstrings "github.com/tendermint/tendermint/libs/strings" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/version" ) @@ -44,9 +46,9 @@ type nodeInfoTransport interface { // ProtocolVersion contains the protocol versions for the software. type ProtocolVersion struct { - P2P version.Protocol `json:"p2p"` - Block version.Protocol `json:"block"` - App version.Protocol `json:"app"` + P2P uint64 `json:"p2p"` + Block uint64 `json:"block"` + App uint64 `json:"app"` } // defaultProtocolVersion populates the Block and P2P versions using @@ -58,7 +60,7 @@ var defaultProtocolVersion = NewProtocolVersion( ) // NewProtocolVersion returns a fully populated ProtocolVersion. -func NewProtocolVersion(p2p, block, app version.Protocol) ProtocolVersion { +func NewProtocolVersion(p2p, block, app uint64) ProtocolVersion { return ProtocolVersion{ P2P: p2p, Block: block, @@ -220,30 +222,50 @@ func (info DefaultNodeInfo) NetAddress() (*NetAddress, error) { return NewNetAddressString(idAddr) } -//----------------------------------------------------------- -// These methods are for Protobuf Compatibility +func (info DefaultNodeInfo) ToProto() *tmp2p.DefaultNodeInfo { -// Size returns the size of the amino encoding, in bytes. -func (info *DefaultNodeInfo) Size() int { - bs, _ := info.Marshal() - return len(bs) -} + dni := new(tmp2p.DefaultNodeInfo) + dni.ProtocolVersion = tmp2p.ProtocolVersion{ + P2P: info.ProtocolVersion.P2P, + Block: info.ProtocolVersion.Block, + App: info.ProtocolVersion.App, + } + + dni.DefaultNodeID = string(info.DefaultNodeID) + dni.ListenAddr = info.ListenAddr + dni.Network = info.Network + dni.Version = info.Version + dni.Channels = info.Channels + dni.Moniker = info.Moniker + dni.Other = tmp2p.DefaultNodeInfoOther{ + TxIndex: info.Other.TxIndex, + RPCAddress: info.Other.RPCAddress, + } -// Marshal returns the amino encoding. -func (info *DefaultNodeInfo) Marshal() ([]byte, error) { - return cdc.MarshalBinaryBare(info) + return dni } -// MarshalTo calls Marshal and copies to the given buffer. -func (info *DefaultNodeInfo) MarshalTo(data []byte) (int, error) { - bs, err := info.Marshal() - if err != nil { - return -1, err +func DefaultNodeInfoFromToProto(pb *tmp2p.DefaultNodeInfo) (DefaultNodeInfo, error) { + if pb == nil { + return DefaultNodeInfo{}, errors.New("nil node info") + } + dni := DefaultNodeInfo{ + ProtocolVersion: ProtocolVersion{ + P2P: pb.ProtocolVersion.P2P, + Block: pb.ProtocolVersion.Block, + App: pb.ProtocolVersion.App, + }, + DefaultNodeID: ID(pb.DefaultNodeID), + ListenAddr: pb.ListenAddr, + Network: pb.Network, + Version: pb.Version, + Channels: pb.Channels, + Moniker: pb.Moniker, + Other: DefaultNodeInfoOther{ + TxIndex: pb.Other.TxIndex, + RPCAddress: pb.Other.RPCAddress, + }, } - return copy(data, bs), nil -} -// Unmarshal deserializes from amino encoded form. -func (info *DefaultNodeInfo) Unmarshal(bs []byte) error { - return cdc.UnmarshalBinaryBare(bs, info) + return dni, nil } diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 8896efe1d..c34e71230 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -1,7 +1,6 @@ package p2p import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -24,8 +23,8 @@ func TestNodeInfoValidate(t *testing.T) { dupChannels = append(dupChannels, testCh) nonASCII := "¢§µ" - emptyTab := fmt.Sprintf("\t") - emptySpace := fmt.Sprintf(" ") + emptyTab := "\t" + emptySpace := " " testCases := []struct { testName string diff --git a/p2p/peer.go b/p2p/peer.go index 7a6d6f868..36db3d728 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -12,6 +12,8 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) +//go:generate mockery --case underscore --name Peer + const metricsTickerDuration = 10 * time.Second // Peer is an interface representing a peer connected on a reactor. @@ -198,7 +200,9 @@ func (p *peer) FlushStop() { func (p *peer) OnStop() { p.metricsTicker.Stop() p.BaseService.OnStop() - p.mconn.Stop() // stop everything and close the conn + if err := p.mconn.Stop(); err != nil { // stop everything and close the conn + p.Logger.Debug("Error while stopping peer", "err", err) + } } //--------------------------------------------------- @@ -318,7 +322,7 @@ func (p *peer) CloseConn() error { // CloseConn closes the underlying connection func (pc *peerConn) CloseConn() { - pc.conn.Close() // nolint: errcheck + pc.conn.Close() } // RemoteAddr returns peer's remote network address. diff --git a/p2p/peer_set.go b/p2p/peer_set.go index 87cf61da0..38dff7a9f 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -2,7 +2,8 @@ package p2p import ( "net" - "sync" + + tmsync "github.com/tendermint/tendermint/libs/sync" ) // IPeerSet has a (immutable) subset of the methods of PeerSet. @@ -19,7 +20,7 @@ type IPeerSet interface { // PeerSet is a special structure for keeping a table of peers. // Iteration over the peers is super fast and thread-safe. type PeerSet struct { - mtx sync.Mutex + mtx tmsync.Mutex lookup map[ID]*peerSetItem list []Peer } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 3273a59aa..b61b43f10 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -18,7 +18,7 @@ type mockPeer struct { id ID } -func (mp *mockPeer) FlushStop() { mp.Stop() } +func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 508cef7fa..f8808f14d 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,14 +25,18 @@ func TestPeerBasic(t *testing.T) { // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() - defer rp.Stop() + t.Cleanup(rp.Stop) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) require.Nil(err) err = p.Start() require.Nil(err) - defer p.Stop() + t.Cleanup(func() { + if err := p.Stop(); err != nil { + t.Error(err) + } + }) assert.True(p.IsRunning()) assert.True(p.IsOutbound()) @@ -52,7 +55,7 @@ func TestPeerSend(t *testing.T) { // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} rp.Start() - defer rp.Stop() + t.Cleanup(rp.Stop) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) require.Nil(err) @@ -60,7 +63,11 @@ func TestPeerSend(t *testing.T) { err = p.Start() require.Nil(err) - defer p.Stop() + t.Cleanup(func() { + if err := p.Stop(); err != nil { + t.Error(err) + } + }) assert.True(p.CanSend(testCh)) assert.True(p.Send(testCh, []byte("Asylum"))) @@ -114,13 +121,13 @@ func testOutboundPeerConn( var pc peerConn conn, err := testDial(addr, config) if err != nil { - return pc, errors.Wrap(err, "Error creating peer") + return pc, fmt.Errorf("error creating peer: %w", err) } pc, err = testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) if err != nil { if cerr := conn.Close(); cerr != nil { - return pc, errors.Wrap(err, cerr.Error()) + return pc, fmt.Errorf("%v: %w", cerr.Error(), err) } return pc, err } @@ -128,7 +135,7 @@ func testOutboundPeerConn( // ensure dialed ID matches connection ID if addr.ID != pc.ID() { if cerr := conn.Close(); cerr != nil { - return pc, errors.Wrap(err, cerr.Error()) + return pc, fmt.Errorf("%v: %w", cerr.Error(), err) } return pc, ErrSwitchAuthenticationFailure{addr, pc.ID()} } diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index c9c372638..6726d15aa 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -5,7 +5,7 @@ package pex import ( - "crypto/sha256" + crand "crypto/rand" "encoding/binary" "fmt" "math" @@ -14,10 +14,13 @@ import ( "sync" "time" + "github.com/minio/highwayhash" + "github.com/tendermint/tendermint/crypto" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p" ) @@ -85,7 +88,7 @@ type addrBook struct { service.BaseService // accessed concurrently - mtx sync.Mutex + mtx tmsync.Mutex rand *tmrand.Rand ourAddrs map[string]struct{} privateIDs map[p2p.ID]struct{} @@ -100,10 +103,17 @@ type addrBook struct { filePath string key string // random prefix for bucket placement routabilityStrict bool + hashKey []byte wg sync.WaitGroup } +func newHashKey() []byte { + result := make([]byte, highwayhash.Size) + crand.Read(result) //nolint:errcheck // ignore error + return result +} + // NewAddrBook creates a new address book. // Use Start to begin processing asynchronous address updates. func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { @@ -115,6 +125,7 @@ func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { badPeers: make(map[p2p.ID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, + hashKey: newHashKey(), } am.init() am.BaseService = *service.NewBaseService(nil, "AddrBook", am) @@ -317,7 +328,9 @@ func (a *addrBook) MarkGood(id p2p.ID) { } ka.markGood() if ka.isNew() { - a.moveToOld(ka) + if err := a.moveToOld(ka); err != nil { + a.Logger.Error("Error moving address to old", "err", err) + } } } @@ -344,16 +357,30 @@ func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { } } +// ReinstateBadPeers removes bad peers from ban list and places them into a new +// bucket. func (a *addrBook) ReinstateBadPeers() { a.mtx.Lock() defer a.mtx.Unlock() + for _, ka := range a.badPeers { - if !ka.isBanned() { - bucket := a.calcNewBucket(ka.Addr, ka.Src) - a.addToNewBucket(ka, bucket) - delete(a.badPeers, ka.ID()) - a.Logger.Info("Reinstated address", "addr", ka.Addr) + if ka.isBanned() { + continue + } + + bucket, err := a.calcNewBucket(ka.Addr, ka.Src) + if err != nil { + a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", + "addr", ka.Addr, "err", err) + continue + } + + if err := a.addToNewBucket(ka, bucket); err != nil { + a.Logger.Error("Error adding peer to new bucket", "err", err) } + delete(a.badPeers, ka.ID()) + + a.Logger.Info("Reinstated address", "addr", ka.Addr) } } @@ -495,11 +522,10 @@ func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAd // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. // NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { - // Sanity check +func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { + // Consistency check to ensure we don't add an already known address if ka.isOld() { - a.Logger.Error("Failed Sanity Check! Cant add old address to new bucket", "ka", ka, "bucket", bucketIdx) - return + return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} } addrStr := ka.Addr.String() @@ -507,7 +533,7 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { // Already exists? if _, ok := bucket[addrStr]; ok { - return + return nil } // Enforce max addresses. @@ -525,6 +551,7 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { // Add it to addrLookup a.addrLookup[ka.ID()] = ka + return nil } // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. @@ -642,8 +669,10 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { ka := a.addrLookup[addr.ID] if ka != nil { - // If its already old and the addr is the same, ignore it. - if ka.isOld() && ka.Addr.Equals(addr) { + // If its already old and the address ID's are the same, ignore it. + // Thereby avoiding issues with a node on the network attempting to change + // the IP of a known node ID. (Which could yield an eclipse attack on the node) + if ka.isOld() && ka.Addr.ID == addr.ID { return nil } // Already in max new buckets. @@ -659,9 +688,11 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { ka = newKnownAddress(addr, src) } - bucket := a.calcNewBucket(addr, src) - a.addToNewBucket(ka, bucket) - return nil + bucket, err := a.calcNewBucket(addr, src) + if err != nil { + return err + } + return a.addToNewBucket(ka, bucket) } func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { @@ -722,15 +753,15 @@ func (a *addrBook) expireNew(bucketIdx int) { // Promotes an address from new to old. If the destination bucket is full, // demote the oldest one to a "new" bucket. // TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) { +func (a *addrBook) moveToOld(ka *knownAddress) error { // Sanity check if ka.isOld() { a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return + return nil } if len(ka.Buckets) == 0 { a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return + return nil } // Remove from all (new) buckets. @@ -739,14 +770,22 @@ func (a *addrBook) moveToOld(ka *knownAddress) { ka.BucketType = bucketTypeOld // Try to add it to its oldBucket destination. - oldBucketIdx := a.calcOldBucket(ka.Addr) + oldBucketIdx, err := a.calcOldBucket(ka.Addr) + if err != nil { + return err + } added := a.addToOldBucket(ka, oldBucketIdx) if !added { // No room; move the oldest to a new bucket oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src) - a.addToNewBucket(oldest, newBucketIdx) + newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) + if err != nil { + return err + } + if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { + a.Logger.Error("Error adding peer to old bucket", "err", err) + } // Finally, add our ka to old bucket again. added = a.addToOldBucket(ka, oldBucketIdx) @@ -754,6 +793,7 @@ func (a *addrBook) moveToOld(ka *knownAddress) { a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) } } + return nil } func (a *addrBook) removeAddress(addr *p2p.NetAddress) { @@ -785,14 +825,16 @@ func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool //--------------------------------------------------------------------- // calculate bucket placements -// doublesha256( key + sourcegroup + -// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) int { +// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets +func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { data1 := []byte{} data1 = append(data1, []byte(a.key)...) data1 = append(data1, []byte(a.groupKey(addr))...) data1 = append(data1, []byte(a.groupKey(src))...) - hash1 := doubleSha256(data1) + hash1, err := a.hash(data1) + if err != nil { + return 0, err + } hash64 := binary.BigEndian.Uint64(hash1) hash64 %= newBucketsPerGroup var hashbuf [8]byte @@ -802,17 +844,23 @@ func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) int { data2 = append(data2, a.groupKey(src)...) data2 = append(data2, hashbuf[:]...) - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % newBucketCount) + hash2, err := a.hash(data2) + if err != nil { + return 0, err + } + result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) + return result, nil } -// doublesha256( key + group + -// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int { +// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets +func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { data1 := []byte{} data1 = append(data1, []byte(a.key)...) data1 = append(data1, []byte(addr.String())...) - hash1 := doubleSha256(data1) + hash1, err := a.hash(data1) + if err != nil { + return 0, err + } hash64 := binary.BigEndian.Uint64(hash1) hash64 %= oldBucketsPerGroup var hashbuf [8]byte @@ -822,36 +870,45 @@ func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int { data2 = append(data2, a.groupKey(addr)...) data2 = append(data2, hashbuf[:]...) - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % oldBucketCount) + hash2, err := a.hash(data2) + if err != nil { + return 0, err + } + result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) + return result, nil } // Return a string representing the network group of this address. -// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string +// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string // "local" for a local address and the string "unroutable" for an unroutable // address. func (a *addrBook) groupKey(na *p2p.NetAddress) string { - if a.routabilityStrict && na.Local() { + return groupKeyFor(na, a.routabilityStrict) +} + +func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { + if routabilityStrict && na.Local() { return "local" } - if a.routabilityStrict && !na.Routable() { + if routabilityStrict && !na.Routable() { return "unroutable" } if ipv4 := na.IP.To4(); ipv4 != nil { - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String() + return na.IP.Mask(net.CIDRMask(16, 32)).String() } + if na.RFC6145() || na.RFC6052() { // last four bytes are the ip address ip := na.IP[12:16] - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() + return ip.Mask(net.CIDRMask(16, 32)).String() } if na.RFC3964() { - ip := na.IP[2:7] - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - + ip := na.IP[2:6] + return ip.Mask(net.CIDRMask(16, 32)).String() } + if na.RFC4380() { // teredo tunnels have the last 4 bytes as the v4 address XOR // 0xff. @@ -859,28 +916,31 @@ func (a *addrBook) groupKey(na *p2p.NetAddress) string { for i, byte := range na.IP[12:16] { ip[i] = byte ^ 0xff } - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() + return ip.Mask(net.CIDRMask(16, 32)).String() + } + + if na.OnionCatTor() { + // group is keyed off the first 4 bits of the actual onion key. + return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) } // OK, so now we know ourselves to be a IPv6 address. // bitcoind uses /32 for everything, except for Hurricane Electric's // (he.net) IP range, which it uses /36 for. bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), - Mask: net.CIDRMask(32, 128)} + heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} if heNet.Contains(na.IP) { bits = 36 } - - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String() + ipv6Mask := net.CIDRMask(bits, 128) + return na.IP.Mask(ipv6Mask).String() } -// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. -func doubleSha256(b []byte) []byte { - hasher := sha256.New() - hasher.Write(b) // nolint:errcheck - sum := hasher.Sum(nil) - hasher.Reset() - hasher.Write(sum) // nolint:errcheck - return hasher.Sum(nil) +func (a *addrBook) hash(b []byte) ([]byte, error) { + hasher, err := highwayhash.New64(a.hashKey) + if err != nil { + return nil, err + } + hasher.Write(b) //nolint:errcheck // ignore error + return hasher.Sum(nil), nil } diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 739fff185..ad41d5562 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "math" + "net" "os" "testing" "time" @@ -34,7 +35,8 @@ func TestAddrBookPickAddress(t *testing.T) { randAddrs := randNetAddressPairs(t, 1) addrSrc := randAddrs[0] - book.AddAddress(addrSrc.addr, addrSrc.src) + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) // pick an address when we only have new address addr = book.PickAddress(0) @@ -67,7 +69,8 @@ func TestAddrBookSaveLoad(t *testing.T) { book = NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - book.Start() + err := book.Start() + require.NoError(t, err) assert.True(t, book.Empty()) @@ -75,7 +78,8 @@ func TestAddrBookSaveLoad(t *testing.T) { randAddrs := randNetAddressPairs(t, 100) for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) } assert.Equal(t, 100, book.Size()) @@ -83,7 +87,8 @@ func TestAddrBookSaveLoad(t *testing.T) { book = NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) - book.Start() + err = book.Start() + require.NoError(t, err) assert.Equal(t, 100, book.Size()) } @@ -99,7 +104,8 @@ func TestAddrBookLookup(t *testing.T) { for _, addrSrc := range randAddrs { addr := addrSrc.addr src := addrSrc.src - book.AddAddress(addr, src) + err := book.AddAddress(addr, src) + require.NoError(t, err) ka := book.HasAddress(addr) assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) @@ -115,7 +121,8 @@ func TestAddrBookPromoteToOld(t *testing.T) { book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) } // Attempt all addresses. @@ -160,9 +167,12 @@ func TestAddrBookHandlesDuplicates(t *testing.T) { differentSrc := randIPv4Address(t) for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - book.AddAddress(addrSrc.addr, differentSrc) // different src + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) + err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate + require.NoError(t, err) + err = book.AddAddress(addrSrc.addr, differentSrc) // different src + require.NoError(t, err) } assert.Equal(t, 100, book.Size()) @@ -208,7 +218,8 @@ func TestAddrBookRemoveAddress(t *testing.T) { book.SetLogger(log.TestingLogger()) addr := randIPv4Address(t) - book.AddAddress(addr, addr) + err := book.AddAddress(addr, addr) + require.NoError(t, err) assert.Equal(t, 1, book.Size()) book.RemoveAddress(addr) @@ -259,7 +270,8 @@ func TestAddrBookGetSelection(t *testing.T) { // 2) add one address addr := randIPv4Address(t) - book.AddAddress(addr, addr) + err := book.AddAddress(addr, addr) + require.NoError(t, err) assert.Equal(t, 1, len(book.GetSelection())) assert.Equal(t, addr, book.GetSelection()[0]) @@ -267,7 +279,8 @@ func TestAddrBookGetSelection(t *testing.T) { // 3) add a bunch of addresses randAddrs := randNetAddressPairs(t, 100) for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) } // check there is no duplicates @@ -300,7 +313,8 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { // 2) add one address addr := randIPv4Address(t) - book.AddAddress(addr, addr) + err := book.AddAddress(addr, addr) + require.NoError(t, err) selection = book.GetSelectionWithBias(biasTowardsNewAddrs) assert.Equal(t, 1, len(selection)) @@ -309,7 +323,8 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { // 3) add a bunch of addresses randAddrs := randNetAddressPairs(t, 100) for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) + err := book.AddAddress(addrSrc.addr, addrSrc.src) + require.NoError(t, err) } // check there is no duplicates @@ -375,7 +390,8 @@ func TestAddrBookHasAddress(t *testing.T) { book := NewAddrBook(fname, true) book.SetLogger(log.TestingLogger()) addr := randIPv4Address(t) - book.AddAddress(addr, addr) + err := book.AddAddress(addr, addr) + require.NoError(t, err) assert.True(t, book.HasAddress(addr)) @@ -441,7 +457,8 @@ func TestAddrBookEmpty(t *testing.T) { require.True(t, book.Empty()) // Check that book with address is not empty - book.AddAddress(randIPv4Address(t), randIPv4Address(t)) + err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) + require.NoError(t, err) require.False(t, book.Empty()) } @@ -572,6 +589,128 @@ func TestMultipleAddrBookAddressSelection(t *testing.T) { } } +func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + // This test creates adds a peer to the address book and marks it good + // It then attempts to override the peer's IP, by adding a peer with the same ID + // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" + peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" + peerRealIP := "1.1.1.1:26656" + peerOverrideAttemptIP := "2.2.2.2:26656" + SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" + + // There is a chance that AddAddress will ignore the new peer its given. + // So we repeat trying to override the peer several times, + // to ensure we aren't in a case that got probabilistically ignored + numOverrideAttempts := 10 + + peerRealAddr, err := p2p.NewNetAddressString(peerID + "@" + peerRealIP) + require.Nil(t, err) + + peerOverrideAttemptAddr, err := p2p.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) + require.Nil(t, err) + + src, err := p2p.NewNetAddressString(SrcAddr) + require.Nil(t, err) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + err = book.AddAddress(peerRealAddr, src) + require.Nil(t, err) + book.MarkAttempt(peerRealAddr) + book.MarkGood(peerRealAddr.ID) + + // Double check that adding a peer again doesn't error + err = book.AddAddress(peerRealAddr, src) + require.Nil(t, err) + + // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) + // This should just be ignored, and not error. + for i := 0; i < numOverrideAttempts; i++ { + err = book.AddAddress(peerOverrideAttemptAddr, src) + require.Nil(t, err) + } + // Now check that the IP was not overridden. + // This is done by sampling several peers from addr book + // and ensuring they all have the correct IP. + // In the expected functionality, this test should only have 1 Peer, hence will pass. + for i := 0; i < numOverrideAttempts; i++ { + selection := book.GetSelection() + for _, addr := range selection { + require.Equal(t, addr.IP, peerRealAddr.IP) + } + } +} + +func TestAddrBookGroupKey(t *testing.T) { + // non-strict routability + testCases := []struct { + name string + ip string + expKey string + }{ + // IPv4 normal. + {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, + {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, + {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, + + // IPv6/IPv4 translations. + {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, + {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, + {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, + {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, + + // Tor. + {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, + {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, + {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, + + // IPv6 normal. + {"ipv6 normal", "2602:100::1", "2602:100::"}, + {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, + {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, + {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, + } + + for i, tc := range testCases { + nip := net.ParseIP(tc.ip) + key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), false) + assert.Equal(t, tc.expKey, key, "#%d", i) + } + + // strict routability + testCases = []struct { + name string + ip string + expKey string + }{ + // Local addresses. + {"ipv4 localhost", "127.0.0.1", "local"}, + {"ipv6 localhost", "::1", "local"}, + {"ipv4 zero", "0.0.0.0", "local"}, + {"ipv4 first octet zero", "0.1.2.3", "local"}, + + // Unroutable addresses. + {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, + {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, + {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, + {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, + {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, + {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, + {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, + {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, + {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, + } + + for i, tc := range testCases { + nip := net.ParseIP(tc.ip) + key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), true) + assert.Equal(t, tc.expKey, key, "#%d", i) + } +} + func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) assert.Equal(t, m, nOld, "old addresses") @@ -607,13 +746,15 @@ func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *add randAddrs := randNetAddressPairs(t, nOld) for _, addr := range randAddrs { - book.AddAddress(addr.addr, addr.src) + err := book.AddAddress(addr.addr, addr.src) + require.NoError(t, err) book.MarkGood(addr.addr.ID) } randAddrs = randNetAddressPairs(t, nNew) for _, addr := range randAddrs { - book.AddAddress(addr.addr, addr.src) + err := book.AddAddress(addr.addr, addr.src) + require.NoError(t, err) } return diff --git a/p2p/pex/codec.go b/p2p/pex/codec.go deleted file mode 100644 index 79ab56380..000000000 --- a/p2p/pex/codec.go +++ /dev/null @@ -1,11 +0,0 @@ -package pex - -import ( - amino "github.com/tendermint/go-amino" -) - -var cdc *amino.Codec = amino.NewCodec() - -func init() { - RegisterMessages(cdc) -} diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 8f51d4217..e60166d06 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -15,6 +15,17 @@ func (err ErrAddrBookNonRoutable) Error() string { return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) } +type errAddrBookOldAddressNewBucket struct { + Addr *p2p.NetAddress + BucketID int +} + +func (err errAddrBookOldAddressNewBucket) Error() string { + return fmt.Sprintf("failed consistency check!"+ + " Cannot add pre-existing address %v into new bucket %v", + err.Addr, err.BucketID) +} + type ErrAddrBookSelf struct { Addr *p2p.NetAddress } diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 3579db893..6f912bc70 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -55,7 +55,7 @@ func (a *addrBook) loadFromFile(filePath string) bool { if err != nil { panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) } - defer r.Close() // nolint: errcheck + defer r.Close() aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index d06814195..fc701bfdd 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -1,22 +1,20 @@ package pex import ( + "errors" "fmt" - "reflect" "sync" "time" - "github.com/pkg/errors" - - "github.com/tendermint/go-amino" + "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/libs/cmap" tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/conn" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) type Peer = p2p.Peer @@ -173,16 +171,19 @@ func (r *Reactor) OnStart() error { // OnStop implements BaseService func (r *Reactor) OnStop() { - r.book.Stop() + if err := r.book.Stop(); err != nil { + r.Logger.Error("Error stopping address book", "err", err) + } } // GetChannels implements Reactor func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{ { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, + ID: PexChannel, + Priority: 1, + SendQueueCapacity: 10, + RecvMessageCapacity: maxMsgSize, }, } } @@ -235,6 +236,8 @@ func (r *Reactor) logErrAddrBook(err error) { } // Receive implements Reactor by handling incoming PEX messages. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { msg, err := decodeMsg(msgBytes) if err != nil { @@ -245,7 +248,7 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) switch msg := msg.(type) { - case *pexRequestMessage: + case *tmp2p.PexRequest: // NOTE: this is a prime candidate for amplification attacks, // so it's important we @@ -282,17 +285,25 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { r.SendAddrs(src, r.book.GetSelection()) } - case *pexAddrsMessage: + case *tmp2p.PexAddrs: // If we asked for addresses, add them to the book - if err := r.ReceiveAddrs(msg.Addrs, src); err != nil { + addrs, err := p2p.NetAddressesFromProto(msg.Addrs) + if err != nil { + r.Switch.StopPeerForError(src, err) + r.book.MarkBad(src.SocketAddr(), defaultBanTime) + return + } + err = r.ReceiveAddrs(addrs, src) + if err != nil { r.Switch.StopPeerForError(src, err) if err == ErrUnsolicitedList { r.book.MarkBad(src.SocketAddr(), defaultBanTime) } return } + default: - r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) } } @@ -339,7 +350,7 @@ func (r *Reactor) RequestAddrs(p Peer) { } r.Logger.Debug("Request addrs", "from", p) r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexRequestMessage{})) + p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) } // ReceiveAddrs adds the given addrs to the addrbook if theres an open @@ -398,7 +409,7 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { // SendAddrs sends addrs to the peer. func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: netAddrs})) + p.Send(PexChannel, mustEncode(&tmp2p.PexAddrs{Addrs: p2p.NetAddressesToProto(netAddrs)})) } // SetEnsurePeersPeriod sets period to ensure peers connected. @@ -409,7 +420,7 @@ func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { // Ensures that sufficient peers are connected. (continuous) func (r *Reactor) ensurePeersRoutine() { var ( - seed = rand.NewRand() + seed = tmrand.NewRand() jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) ) @@ -545,8 +556,8 @@ func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { // exponential backoff if it's not our first attempt to dial given address if attempts > 0 { - jitterSeconds := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) + jitter := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) + backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) sinceLastDialed := time.Since(lastDialed) if sinceLastDialed < backoffDuration { @@ -568,7 +579,7 @@ func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { default: r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) } - return errors.Wrapf(err, "dialing failed (attempts: %d)", attempts+1) + return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) } // cleanup any history @@ -603,7 +614,7 @@ func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err e case p2p.ErrNetAddressLookup: r.Logger.Error("Connecting to seed failed", "err", e) default: - return 0, nil, errors.Wrap(e, "seed node configuration has error") + return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) } } return numOnline, netAddrs, nil @@ -760,41 +771,39 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { //----------------------------------------------------------------------------- // Messages -// Message is a primary type for PEX messages. Underneath, it could contain -// either pexRequestMessage, or pexAddrsMessage messages. -type Message interface{} - -func RegisterMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*Message)(nil), nil) - cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil) - cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil) -} - -func decodeMsg(bz []byte) (msg Message, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) +// mustEncode proto encodes a tmp2p.Message +func mustEncode(pb proto.Message) []byte { + msg := tmp2p.Message{} + switch pb := pb.(type) { + case *tmp2p.PexRequest: + msg.Sum = &tmp2p.Message_PexRequest{PexRequest: pb} + case *tmp2p.PexAddrs: + msg.Sum = &tmp2p.Message_PexAddrs{PexAddrs: pb} + default: + panic(fmt.Sprintf("Unknown message type %T", pb)) } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} -/* -A pexRequestMessage requests additional peer addresses. -*/ -type pexRequestMessage struct { + bz, err := msg.Marshal() + if err != nil { + panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) + } + return bz } -func (m *pexRequestMessage) String() string { - return "[pexRequest]" -} +func decodeMsg(bz []byte) (proto.Message, error) { + pb := &tmp2p.Message{} -/* -A message with announced peer addresses. -*/ -type pexAddrsMessage struct { - Addrs []*p2p.NetAddress -} + err := pb.Unmarshal(bz) + if err != nil { + return nil, err + } -func (m *pexAddrsMessage) String() string { - return fmt.Sprintf("[pexAddrs %v]", m.Addrs) + switch msg := pb.Sum.(type) { + case *tmp2p.Message_PexRequest: + return msg.PexRequest, nil + case *tmp2p.Message_PexAddrs: + return msg.PexAddrs, nil + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 04f4149eb..4ed1254ef 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -1,6 +1,7 @@ package pex import ( + "encoding/hex" "fmt" "io/ioutil" "os" @@ -8,6 +9,7 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -15,6 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/mock" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) var ( @@ -72,7 +75,7 @@ func TestPEXReactorRunning(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) books := make([]AddrBook, N) logger := log.TestingLogger() @@ -97,7 +100,8 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { addr := switches[otherSwitchIndex].NetAddress() - books[switchIndex].AddAddress(addr, addr) + err := books[switchIndex].AddAddress(addr, addr) + require.NoError(t, err) } addOtherNodeAddrToAddrBook(0, 1) @@ -113,7 +117,8 @@ func TestPEXReactorRunning(t *testing.T) { // stop them for _, s := range switches { - s.Stop() + err := s.Stop() + require.NoError(t, err) } } @@ -127,12 +132,11 @@ func TestPEXReactorReceive(t *testing.T) { r.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.SocketAddr()} - msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) - msg = cdc.MustMarshalBinaryBare(&pexRequestMessage{}) + msg = mustEncode(&tmp2p.PexRequest{}) r.Receive(PexChannel, peer, msg) // should not panic. } @@ -147,11 +151,12 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { peerAddr := peer.SocketAddr() p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) - book.AddAddress(peerAddr, peerAddr) + err := book.AddAddress(peerAddr, peerAddr) + require.NoError(t, err) require.True(t, book.HasAddress(peerAddr)) id := string(peer.ID()) - msg := cdc.MustMarshalBinaryBare(&pexRequestMessage{}) + msg := mustEncode(&tmp2p.PexRequest{}) // first time creates the entry r.Receive(PexChannel, peer, msg) @@ -188,8 +193,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - addrs := []*p2p.NetAddress{peer.SocketAddr()} - msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) // receive some addrs. should clear the request r.Receive(PexChannel, peer, msg) @@ -206,12 +210,12 @@ func TestCheckSeeds(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) // 1. test creating peer with no seeds works peerSwitch := testCreateDefaultPeer(dir, 0) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() + peerSwitch.Stop() // nolint:errcheck // ignore for tests // 2. create seed seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) @@ -219,7 +223,7 @@ func TestCheckSeeds(t *testing.T) { // 3. test create peer with online seed works peerSwitch = testCreatePeerWithSeed(dir, 2, seed) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() + peerSwitch.Stop() // nolint:errcheck // ignore for tests // 4. test create peer with all seeds having unresolvable DNS fails badPeerConfig := &ReactorConfig{ @@ -228,7 +232,7 @@ func TestCheckSeeds(t *testing.T) { } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() + peerSwitch.Stop() // nolint:errcheck // ignore for tests // 5. test create peer with one good seed address succeeds badPeerConfig = &ReactorConfig{ @@ -238,24 +242,24 @@ func TestCheckSeeds(t *testing.T) { } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() + peerSwitch.Stop() // nolint:errcheck // ignore for tests } func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) // 1. create seed seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) require.Nil(t, seed.Start()) - defer seed.Stop() + defer seed.Stop() // nolint:errcheck // ignore for tests // 2. create usual peer with only seed configured. peer := testCreatePeerWithSeed(dir, 1, seed) require.Nil(t, peer.Start()) - defer peer.Stop() + defer peer.Stop() // nolint:errcheck // ignore for tests // 3. check that the peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) @@ -265,23 +269,23 @@ func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) // 1. create peer peerSwitch := testCreateDefaultPeer(dir, 1) require.Nil(t, peerSwitch.Start()) - defer peerSwitch.Stop() + defer peerSwitch.Stop() // nolint:errcheck // ignore for tests // 2. Create seed which knows about the peer peerAddr := peerSwitch.NetAddress() seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) require.Nil(t, seed.Start()) - defer seed.Stop() + defer seed.Stop() // nolint:errcheck // ignore for tests // 3. create another peer with only seed configured. secondPeer := testCreatePeerWithSeed(dir, 3, seed) require.Nil(t, secondPeer.Start()) - defer secondPeer.Stop() + defer secondPeer.Stop() // nolint:errcheck // ignore for tests // 4. check that the second peer connects to seed immediately assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) @@ -294,7 +298,7 @@ func TestPEXReactorSeedMode(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} pexR, book := createReactor(pexRConfig) @@ -304,13 +308,13 @@ func TestPEXReactorSeedMode(t *testing.T) { sw.SetAddrBook(book) err = sw.Start() require.NoError(t, err) - defer sw.Stop() + defer sw.Stop() // nolint:errcheck // ignore for tests assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() + defer peerSwitch.Stop() // nolint:errcheck // ignore for tests // 1. Test crawlPeers dials the peer pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) @@ -333,7 +337,7 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} pexR, book := createReactor(pexRConfig) @@ -343,13 +347,13 @@ func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { sw.SetAddrBook(book) err = sw.Start() require.NoError(t, err) - defer sw.Stop() + defer sw.Stop() // nolint:errcheck // ignore for tests assert.Zero(t, sw.Peers().Size()) peerSwitch := testCreateDefaultPeer(dir, 1) require.NoError(t, peerSwitch.Start()) - defer peerSwitch.Stop() + defer peerSwitch.Stop() // nolint:errcheck // ignore for tests err = sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}) require.NoError(t, err) @@ -371,7 +375,7 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) pexR, book := createReactor(&ReactorConfig{SeedMode: true}) defer teardownReactor(book) @@ -407,7 +411,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { // directory to store address books dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck + defer os.RemoveAll(dir) books := make([]AddrBook, N) logger := log.TestingLogger() @@ -461,12 +465,14 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { // it should be safe to do this. peers := switches[0].Peers().List() for _, peer := range peers { - peer.Stop() + err := peer.Stop() + require.NoError(t, err) } // stop the switches for _, s := range switches { - s.Stop() + err := s.Stop() + require.NoError(t, err) } } @@ -481,8 +487,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { pexR.RequestAddrs(peer) size := book.Size() - addrs := []*p2p.NetAddress{peer.SocketAddr()} - msg := cdc.MustMarshalBinaryBare(&pexAddrsMessage{Addrs: addrs}) + msg := mustEncode(&tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{peer.SocketAddr().ToProto()}}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) @@ -503,12 +508,14 @@ func TestPEXReactorDialPeer(t *testing.T) { assert.Equal(t, 0, pexR.AttemptsToDial(addr)) // 1st unsuccessful attempt - pexR.dialPeer(addr) + err := pexR.dialPeer(addr) + require.Error(t, err) assert.Equal(t, 1, pexR.AttemptsToDial(addr)) // 2nd unsuccessful attempt - pexR.dialPeer(addr) + err = pexR.dialPeer(addr) + require.Error(t, err) // must be skipped because it is too early assert.Equal(t, 1, pexR.AttemptsToDial(addr)) @@ -517,7 +524,8 @@ func TestPEXReactorDialPeer(t *testing.T) { time.Sleep(3 * time.Second) // 3rd attempt - pexR.dialPeer(addr) + err = pexR.dialPeer(addr) + require.Error(t, err) assert.Equal(t, 2, pexR.AttemptsToDial(addr)) } @@ -611,7 +619,7 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) book.SetLogger(log.TestingLogger()) for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) + book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests book.MarkGood(knownAddrs[j].ID) } sw.SetAddrBook(book) @@ -667,3 +675,29 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } return sw } + +func TestPexVectors(t *testing.T) { + + addr := tmp2p.NetAddress{ + ID: "1", + IP: "127.0.0.1", + Port: 9090, + } + + testCases := []struct { + testName string + msg proto.Message + expBytes string + }{ + {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, + {"PexAddrs", &tmp2p.PexAddrs{Addrs: []tmp2p.NetAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, + } + + for _, tc := range testCases { + tc := tc + + bz := mustEncode(tc.msg) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } +} diff --git a/p2p/switch.go b/p2p/switch.go index 3f9325808..f4f335b32 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/rand" @@ -48,6 +46,7 @@ func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { // to store peer addresses. type AddrBook interface { AddAddress(addr *NetAddress, src *NetAddress) error + AddPrivateIDs([]string) AddOurAddress(*NetAddress) OurAddress(*NetAddress) bool MarkGood(ID) @@ -226,7 +225,7 @@ func (sw *Switch) OnStart() error { for _, reactor := range sw.reactors { err := reactor.Start() if err != nil { - return errors.Wrapf(err, "failed to start %v", reactor) + return fmt.Errorf("failed to start %v: %w", reactor, err) } } @@ -246,7 +245,9 @@ func (sw *Switch) OnStop() { // Stop reactors sw.Logger.Debug("Switch: Stopping reactors") for _, reactor := range sw.reactors { - reactor.Stop() + if err := reactor.Stop(); err != nil { + sw.Logger.Error("error while stopped reactor", "reactor", reactor, "error", err) + } } } @@ -321,6 +322,10 @@ func (sw *Switch) Peers() IPeerSet { // If the peer is persistent, it will attempt to reconnect. // TODO: make record depending on reason. func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { + if !peer.IsRunning() { + return + } + sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) sw.stopAndRemovePeer(peer, reason) @@ -350,7 +355,9 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { sw.transport.Cleanup(peer) - peer.Stop() + if err := peer.Stop(); err != nil { + sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly + } for _, reactor := range sw.reactors { reactor.RemovePeer(peer, reason) @@ -443,7 +450,7 @@ type privateAddr interface { } func isPrivateAddr(err error) bool { - te, ok := errors.Cause(err).(privateAddr) + te, ok := err.(privateAddr) return ok && te.PrivateAddr() } @@ -577,13 +584,28 @@ func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { for i, id := range ids { err := validateID(ID(id)) if err != nil { - return errors.Wrapf(err, "wrong ID #%d", i) + return fmt.Errorf("wrong ID #%d: %w", i, err) } sw.unconditionalPeerIDs[ID(id)] = struct{}{} } return nil } +func (sw *Switch) AddPrivatePeerIDs(ids []string) error { + validIDs := make([]string, 0, len(ids)) + for i, id := range ids { + err := validateID(ID(id)) + if err != nil { + return fmt.Errorf("wrong ID #%d: %w", i, err) + } + validIDs = append(validIDs, id) + } + + sw.addrBook.AddPrivateIDs(validIDs) + + return nil +} + func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { for _, pa := range sw.persistentPeersAddrs { if pa.Equals(na) { diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 50e0adb3a..538ccfcc3 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -11,7 +11,6 @@ import ( "net/http/httptest" "regexp" "strconv" - "sync" "sync/atomic" "testing" "time" @@ -23,6 +22,7 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" "github.com/tendermint/tendermint/p2p/conn" ) @@ -45,7 +45,7 @@ type PeerMessage struct { type TestReactor struct { BaseReactor - mtx sync.Mutex + mtx tmsync.Mutex channels []*conn.ChannelDescriptor logMessages bool msgsCounter int @@ -75,7 +75,7 @@ func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { if tr.logMessages { tr.mtx.Lock() defer tr.mtx.Unlock() - //fmt.Printf("Received: %X, %X\n", chID, msgBytes) + // fmt.Printf("Received: %X, %X\n", chID, msgBytes) tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) tr.msgsCounter++ } @@ -98,9 +98,9 @@ func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc } func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&addrBookMock{ - addrs: make(map[string]struct{}), - ourAddrs: make(map[string]struct{})}) + sw.SetAddrBook(&AddrBookMock{ + Addrs: make(map[string]struct{}), + OurAddrs: make(map[string]struct{})}) // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ @@ -117,8 +117,16 @@ func initSwitchFunc(i int, sw *Switch) *Switch { func TestSwitches(t *testing.T) { s1, s2 := MakeSwitchPair(t, initSwitchFunc) - defer s1.Stop() - defer s2.Stop() + t.Cleanup(func() { + if err := s1.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := s2.Stop(); err != nil { + t.Error(err) + } + }) if s1.Peers().Size() != 1 { t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) @@ -219,12 +227,18 @@ func TestSwitchPeerFilter(t *testing.T) { SwitchPeerFilters(filters...), ) ) - defer sw.Stop() + err := sw.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() - defer rp.Stop() + t.Cleanup(rp.Stop) p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ chDescs: sw.chDescs, @@ -264,7 +278,13 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { SwitchPeerFilters(filters...), ) ) - defer sw.Stop() + err := sw.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Log(err) + } + }) // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -289,8 +309,13 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { func TestSwitchPeerFilterDuplicate(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - sw.Start() - defer sw.Stop() + err := sw.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -336,7 +361,11 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { if err != nil { t.Error(err) } - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // simulate remote peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -357,7 +386,8 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { require.NotNil(sw.Peers().Get(rp.ID())) // simulate failure by closing connection - p.(*peer).CloseConn() + err = p.(*peer).CloseConn() + require.NoError(err) assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) assert.False(p.IsRunning()) @@ -369,7 +399,7 @@ func TestSwitchStopPeerForError(t *testing.T) { scrapeMetrics := func() string { resp, err := http.Get(s.URL) - assert.NoError(t, err) + require.NoError(t, err) defer resp.Body.Close() buf, _ := ioutil.ReadAll(resp.Body) return string(buf) @@ -404,7 +434,11 @@ func TestSwitchStopPeerForError(t *testing.T) { // stop sw2. this should cause the p to fail, // which results in calling StopPeerForError internally - sw2.Stop() + t.Cleanup(func() { + if err := sw2.Stop(); err != nil { + t.Error(err) + } + }) // now call StopPeerForError explicitly, eg. from a reactor sw1.StopPeerForError(p, fmt.Errorf("some err")) @@ -417,7 +451,11 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // 1. simulate failure by closing connection rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -432,7 +470,8 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { require.NotNil(t, sw.Peers().Get(rp.ID())) p := sw.Peers().List()[0] - p.(*peer).CloseConn() + err = p.(*peer).CloseConn() + require.NoError(t, err) waitUntilSwitchHasAtLeastNPeers(sw, 1) assert.False(t, p.IsRunning()) // old peer instance @@ -462,7 +501,11 @@ func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // 1. simulate failure by closing the connection rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -491,7 +534,11 @@ func TestSwitchDialPeersAsync(t *testing.T) { sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} rp.Start() @@ -517,7 +564,12 @@ func TestSwitchFullConnectivity(t *testing.T) { switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) defer func() { for _, sw := range switches { - sw.Stop() + sw := sw + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) } }() @@ -546,10 +598,15 @@ func TestSwitchAcceptRoutine(t *testing.T) { // make switch sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - err := sw.Start() + err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) + require.NoError(t, err) + err = sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // 0. check there are no peers assert.Equal(t, 0, sw.Peers().Size()) @@ -583,7 +640,8 @@ func TestSwitchAcceptRoutine(t *testing.T) { require.NoError(t, err) // check conn is closed one := make([]byte, 1) - conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + err = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + require.NoError(t, err) _, err = conn.Read(one) assert.Equal(t, io.EOF, err) assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) @@ -637,23 +695,26 @@ func TestSwitchAcceptRoutineErrorCases(t *testing.T) { sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) assert.NotPanics(t, func() { err := sw.Start() - assert.NoError(t, err) - sw.Stop() + require.NoError(t, err) + err = sw.Stop() + require.NoError(t, err) }) sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) assert.NotPanics(t, func() { err := sw.Start() - assert.NoError(t, err) - sw.Stop() + require.NoError(t, err) + err = sw.Stop() + require.NoError(t, err) }) // TODO(melekes) check we remove our address from addrBook sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) assert.NotPanics(t, func() { err := sw.Start() - assert.NoError(t, err) - sw.Stop() + require.NoError(t, err) + err = sw.Stop() + require.NoError(t, err) }) } @@ -698,7 +759,11 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { }) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) // add peer rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} @@ -706,11 +771,15 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { defer rp.Stop() _, err = rp.Dial(sw.NetAddress()) require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - // stop peer asynchronously - go sw.StopPeerForError(sw.Peers().Get(rp.ID()), "test") + // wait till the switch adds rp to the peer set, then stop the peer asynchronously + for { + time.Sleep(20 * time.Millisecond) + if peer := sw.Peers().Get(rp.ID()); peer != nil { + go sw.StopPeerForError(peer, "test") + break + } + } // simulate peer reconnecting to us _, err = rp.Dial(sw.NetAddress()) @@ -735,8 +804,18 @@ func BenchmarkSwitchBroadcast(b *testing.B) { }, false)) return sw }) - defer s1.Stop() - defer s2.Stop() + + b.Cleanup(func() { + if err := s1.Stop(); err != nil { + b.Error(err) + } + }) + + b.Cleanup(func() { + if err := s2.Stop(); err != nil { + b.Error(err) + } + }) // Allow time for goroutines to boot up time.Sleep(1 * time.Second) @@ -760,29 +839,3 @@ func BenchmarkSwitchBroadcast(b *testing.B) { b.Logf("success: %v, failure: %v", numSuccess, numFailure) } - -type addrBookMock struct { - addrs map[string]struct{} - ourAddrs map[string]struct{} -} - -var _ AddrBook = (*addrBookMock)(nil) - -func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.addrs[addr.String()] = struct{}{} - return nil -} -func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} } -func (book *addrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.ourAddrs[addr.String()] - return ok -} -func (book *addrBookMock) MarkGood(ID) {} -func (book *addrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.addrs[addr.String()] - return ok -} -func (book *addrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.addrs, addr.String()) -} -func (book *addrBookMock) Save() {} diff --git a/p2p/test_util.go b/p2p/test_util.go index 045dc4c7e..88bbb4d8b 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,8 +5,6 @@ import ( "net" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" @@ -31,7 +29,7 @@ func (ni mockNodeInfo) Validate() error { return nil } func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) + sw.peers.Add(peer) //nolint:errcheck // ignore error } func CreateRandomPeer(outbound bool) Peer { @@ -236,16 +234,10 @@ func testPeerConn( ) (pc peerConn, err error) { conn := rawConn - // Fuzz connection - if cfg.TestFuzz { - // so we have time to do peer handshakes and get set up - conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) - } - // Encrypt connection conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) if err != nil { - return pc, errors.Wrap(err, "Error creating peer") + return pc, fmt.Errorf("error creating peer: %w", err) } // Only the information we already have @@ -282,3 +274,35 @@ func getFreePort() int { } return port } + +type AddrBookMock struct { + Addrs map[string]struct{} + OurAddrs map[string]struct{} + PrivateAddrs map[string]struct{} +} + +var _ AddrBook = (*AddrBookMock)(nil) + +func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { + book.Addrs[addr.String()] = struct{}{} + return nil +} +func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } +func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { + _, ok := book.OurAddrs[addr.String()] + return ok +} +func (book *AddrBookMock) MarkGood(ID) {} +func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { + _, ok := book.Addrs[addr.String()] + return ok +} +func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { + delete(book.Addrs, addr.String()) +} +func (book *AddrBookMock) Save() {} +func (book *AddrBookMock) AddPrivateIDs(addrs []string) { + for _, addr := range addrs { + book.PrivateAddrs[addr] = struct{}{} + } +} diff --git a/p2p/transport.go b/p2p/transport.go index 6b749c61f..e597ac0a1 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -6,11 +6,12 @@ import ( "net" "time" - "github.com/pkg/errors" "golang.org/x/net/netutil" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) const ( @@ -288,7 +289,7 @@ func (mt *MultiplexTransport) acceptPeers() { if r := recover(); r != nil { err := ErrRejected{ conn: c, - err: errors.Errorf("recovered from panic: %v", r), + err: fmt.Errorf("recovered from panic: %v", r), isAuthFailure: true, } select { @@ -525,20 +526,18 @@ func handshake( var ( errc = make(chan error, 2) - peerNodeInfo DefaultNodeInfo - ourNodeInfo = nodeInfo.(DefaultNodeInfo) + pbpeerNodeInfo tmp2p.DefaultNodeInfo + peerNodeInfo DefaultNodeInfo + ourNodeInfo = nodeInfo.(DefaultNodeInfo) ) go func(errc chan<- error, c net.Conn) { - _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo) + _, err := protoio.NewDelimitedWriter(c).WriteMsg(ourNodeInfo.ToProto()) errc <- err }(errc, c) go func(errc chan<- error, c net.Conn) { - _, err := cdc.UnmarshalBinaryLengthPrefixedReader( - c, - &peerNodeInfo, - int64(MaxNodeInfoSize()), - ) + protoReader := protoio.NewDelimitedReader(c, MaxNodeInfoSize()) + err := protoReader.ReadMsg(&pbpeerNodeInfo) errc <- err }(errc, c) @@ -549,6 +548,11 @@ func handshake( } } + peerNodeInfo, err := DefaultNodeInfoFromToProto(&pbpeerNodeInfo) + if err != nil { + return nil, err + } + return peerNodeInfo, c.SetDeadline(time.Time{}) } diff --git a/p2p/transport_test.go b/p2p/transport_test.go index 2fc69ce05..9b81dcd63 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -5,12 +5,15 @@ import ( "math/rand" "net" "reflect" + "runtime" "strings" "testing" "time" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" + tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) var defaultNodeName = "host_peer" @@ -78,10 +81,10 @@ func TestTransportMultiplexConnFilter(t *testing.T) { _, err = mt.Accept(peerConfig{}) if err, ok := err.(ErrRejected); ok { if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") + t.Errorf("expected peer to be filtered, got %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } @@ -97,7 +100,7 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) MultiplexTransportConnFilters( func(_ ConnSet, _ net.Conn, _ []net.IP) error { - time.Sleep(10 * time.Millisecond) + time.Sleep(1 * time.Second) return nil }, )(mt) @@ -112,7 +115,6 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { } errc := make(chan error) - go func() { addr := NewNetAddress(id, mt.listener.Addr()) @@ -131,18 +133,21 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) { _, err = mt.Accept(peerConfig{}) if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") + t.Errorf("expected ErrFilterTimeout, got %v", err) } } func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { + pv := ed25519.GenPrivKey() + id := PubKeyToID(pv.PubKey()) mt := newMultiplexTransport( - emptyNodeInfo(), + testNodeInfo( + id, "transport", + ), NodeKey{ - PrivKey: ed25519.GenPrivKey(), + PrivKey: pv, }, ) - id := mt.nodeKey.ID() MultiplexTransportMaxIncomingConnections(0)(mt) @@ -150,32 +155,34 @@ func TestTransportMultiplexMaxIncomingConnections(t *testing.T) { if err != nil { t.Fatal(err) } - + const maxIncomingConns = 2 + MultiplexTransportMaxIncomingConnections(maxIncomingConns)(mt) if err := mt.Listen(*addr); err != nil { t.Fatal(err) } - errc := make(chan error) + laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr()) - go func() { - addr := NewNetAddress(id, mt.listener.Addr()) + // Connect more peers than max + for i := 0; i <= maxIncomingConns; i++ { + errc := make(chan error) + go testDialer(*laddr, errc) - _, err := addr.Dial() - if err != nil { - errc <- err - return + err = <-errc + if i < maxIncomingConns { + if err != nil { + t.Errorf("dialer connection failed: %v", err) + } + _, err = mt.Accept(peerConfig{}) + if err != nil { + t.Errorf("connection failed: %v", err) + } + } else if err == nil || !strings.Contains(err.Error(), "i/o timeout") { + // mt actually blocks forever on trying to accept a new peer into a full channel so + // expect the dialer to encounter a timeout error. Calling mt.Accept will block until + // mt is closed. + t.Errorf("expected i/o timeout error, got %v", err) } - - close(errc) - }() - - if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) - } - - _, err = mt.Accept(peerConfig{}) - if err == nil || !strings.Contains(err.Error(), "connection reset by peer") { - t.Errorf("expected connection reset by peer error, got %v", err) } } @@ -263,6 +270,7 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { errc = make(chan error) fastc = make(chan struct{}) slowc = make(chan struct{}) + slowdonec = make(chan struct{}) ) // Simulate slow Peer. @@ -276,29 +284,34 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { } close(slowc) + defer func() { + close(slowdonec) + }() + + // Make sure we switch to fast peer goroutine. + runtime.Gosched() select { case <-fastc: // Fast peer connected. - case <-time.After(50 * time.Millisecond): + case <-time.After(200 * time.Millisecond): // We error if the fast peer didn't succeed. errc <- fmt.Errorf("fast peer timed out") } - sc, err := upgradeSecretConn(c, 20*time.Millisecond, ed25519.GenPrivKey()) + sc, err := upgradeSecretConn(c, 200*time.Millisecond, ed25519.GenPrivKey()) if err != nil { errc <- err return } - _, err = handshake(sc, 20*time.Millisecond, + _, err = handshake(sc, 200*time.Millisecond, testNodeInfo( PubKeyToID(ed25519.GenPrivKey().PubKey()), "slow_peer", )) if err != nil { errc <- err - return } }() @@ -322,12 +335,13 @@ func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { return } - close(errc) close(fastc) + <-slowdonec + close(errc) }() if err := <-errc; err != nil { - t.Errorf("connection failed: %v", err) + t.Logf("connection failed: %v", err) } p, err := mt.Accept(peerConfig{}) @@ -374,10 +388,10 @@ func TestTransportMultiplexValidateNodeInfo(t *testing.T) { _, err := mt.Accept(peerConfig{}) if err, ok := err.(ErrRejected); ok { if !err.IsNodeInfoInvalid() { - t.Errorf("expected NodeInfo to be invalid") + t.Errorf("expected NodeInfo to be invalid, got %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } @@ -413,10 +427,10 @@ func TestTransportMultiplexRejectMissmatchID(t *testing.T) { _, err := mt.Accept(peerConfig{}) if err, ok := err.(ErrRejected); ok { if !err.IsAuthFailure() { - t.Errorf("expected auth failure") + t.Errorf("expected auth failure, got %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } @@ -441,10 +455,10 @@ func TestTransportMultiplexDialRejectWrongID(t *testing.T) { t.Logf("connection failed: %v", err) if err, ok := err.(ErrRejected); ok { if !err.IsAuthFailure() { - t.Errorf("expected auth failure") + t.Errorf("expected auth failure, got %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } } @@ -478,10 +492,10 @@ func TestTransportMultiplexRejectIncompatible(t *testing.T) { _, err := mt.Accept(peerConfig{}) if err, ok := err.(ErrRejected); ok { if !err.IsIncompatible() { - t.Errorf("expected to reject incompatible") + t.Errorf("expected to reject incompatible, got %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } @@ -508,7 +522,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { t.Errorf("expected to reject self, got: %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", err) } } else { t.Errorf("expected connection failure") @@ -520,7 +534,7 @@ func TestTransportMultiplexRejectSelf(t *testing.T) { t.Errorf("expected to reject self, got: %v", err) } } else { - t.Errorf("expected ErrRejected") + t.Errorf("expected ErrRejected, got %v", nil) } } @@ -568,19 +582,24 @@ func TestTransportHandshake(t *testing.T) { } go func(c net.Conn) { - _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, peerNodeInfo.(DefaultNodeInfo)) + _, err := protoio.NewDelimitedWriter(c).WriteMsg(peerNodeInfo.(DefaultNodeInfo).ToProto()) if err != nil { t.Error(err) } }(c) go func(c net.Conn) { - var ni DefaultNodeInfo - - _, err := cdc.UnmarshalBinaryLengthPrefixedReader( - c, - &ni, - int64(MaxNodeInfoSize()), + var ( + // ni DefaultNodeInfo + pbni tmp2p.DefaultNodeInfo ) + + protoReader := protoio.NewDelimitedReader(c, MaxNodeInfoSize()) + err := protoReader.ReadMsg(&pbni) + if err != nil { + t.Error(err) + } + + _, err = DefaultNodeInfoFromToProto(&pbni) if err != nil { t.Error(err) } @@ -626,6 +645,9 @@ func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { t.Fatal(err) } + // give the listener some time to get ready + time.Sleep(20 * time.Millisecond) + return mt } diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go index b54415ed8..dd2d75d43 100644 --- a/p2p/trust/metric.go +++ b/p2p/trust/metric.go @@ -5,10 +5,10 @@ package trust import ( "math" - "sync" "time" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) //--------------------------------------------------------------------------------------- @@ -36,7 +36,7 @@ type Metric struct { service.BaseService // Mutex that protects the metric from concurrent access - mtx sync.Mutex + mtx tmsync.Mutex // Determines the percentage given to current behavior proportionalWeight float64 diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go index 50b528972..65caf38a2 100644 --- a/p2p/trust/metric_test.go +++ b/p2p/trust/metric_test.go @@ -5,11 +5,13 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTrustMetricScores(t *testing.T) { tm := NewMetric() - tm.Start() + err := tm.Start() + require.NoError(t, err) // Perfect score tm.GoodEvents(1) @@ -20,7 +22,8 @@ func TestTrustMetricScores(t *testing.T) { tm.BadEvents(10) score = tm.TrustScore() assert.NotEqual(t, 100, score) - tm.Stop() + err = tm.Stop() + require.NoError(t, err) } func TestTrustMetricConfig(t *testing.T) { @@ -32,7 +35,8 @@ func TestTrustMetricConfig(t *testing.T) { } tm := NewMetricWithConfig(config) - tm.Start() + err := tm.Start() + require.NoError(t, err) // The max time intervals should be the TrackingWindow / IntervalLen assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) @@ -41,18 +45,21 @@ func TestTrustMetricConfig(t *testing.T) { // These weights should still be the default values assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) assert.Equal(t, dc.IntegralWeight, tm.integralWeight) - tm.Stop() + err = tm.Stop() + require.NoError(t, err) tm.Wait() config.ProportionalWeight = 0.3 config.IntegralWeight = 0.7 tm = NewMetricWithConfig(config) - tm.Start() + err = tm.Start() + require.NoError(t, err) // These weights should be equal to our custom values assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) assert.Equal(t, config.IntegralWeight, tm.integralWeight) - tm.Stop() + err = tm.Stop() + require.NoError(t, err) tm.Wait() } @@ -72,7 +79,8 @@ func _TestTrustMetricStopPause(t *testing.T) { tt := NewTestTicker() tm := NewMetric() tm.SetTicker(tt) - tm.Start() + err := tm.Start() + require.NoError(t, err) // Allow some time intervals to pass and pause tt.NextTick() tt.NextTick() @@ -91,7 +99,8 @@ func _TestTrustMetricStopPause(t *testing.T) { // Allow some time intervals to pass and stop tt.NextTick() tt.NextTick() - tm.Stop() + err = tm.Stop() + require.NoError(t, err) tm.Wait() second := tm.Copy().numIntervals diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 166b26b1c..e7233c915 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -6,12 +6,12 @@ package trust import ( "encoding/json" "fmt" - "sync" "time" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" ) const defaultStorePeriodicSaveInterval = 1 * time.Minute @@ -26,7 +26,7 @@ type MetricStore struct { peerMetrics map[string]*Metric // Mutex that protects the map and history data file - mtx sync.Mutex + mtx tmsync.Mutex // The db where peer trust metric history data will be stored db dbm.DB @@ -72,7 +72,9 @@ func (tms *MetricStore) OnStop() { // Stop all trust metric go-routines for _, tm := range tms.peerMetrics { - tm.Stop() + if err := tm.Stop(); err != nil { + tms.Logger.Error("unable to stop metric store", "error", err) + } } // Make the final trust history data save @@ -108,7 +110,9 @@ func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { if !ok { // If the metric is not available, we will create it tm = NewMetricWithConfig(tms.config) - tm.Start() + if err := tm.Start(); err != nil { + tms.Logger.Error("unable to start metric store", "error", err) + } // The metric needs to be in the map tms.peerMetrics[key] = tm } @@ -168,7 +172,9 @@ func (tms *MetricStore) loadFromDB() bool { for key, p := range peers { tm := NewMetricWithConfig(tms.config) - tm.Start() + if err := tm.Start(); err != nil { + tms.Logger.Error("unable to start metric", "error", err) + } tm.Init(p) // Load the peer trust metric into the store tms.peerMetrics[key] = tm @@ -193,7 +199,9 @@ func (tms *MetricStore) saveToDB() { tms.Logger.Error("Failed to encode the TrustHistory", "err", err) return } - tms.db.SetSync(trustMetricKey, bytes) + if err := tms.db.SetSync(trustMetricKey, bytes); err != nil { + tms.Logger.Error("failed to flush data to disk", "error", err) + } } // Periodically saves the trust history data to the DB diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 1cd83916c..df0f14a04 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" @@ -17,12 +18,11 @@ import ( func TestTrustMetricStoreSaveLoad(t *testing.T) { dir, err := ioutil.TempDir("", "trust_test") - if err != nil { - panic(err) - } + require.NoError(t, err) defer os.Remove(dir) - historyDB := dbm.NewDB("trusthistory", "goleveldb", dir) + historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) + require.NoError(t, err) // 0 peers saved store := NewTrustMetricStore(historyDB, DefaultConfig()) @@ -31,7 +31,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { // Load the data from the file store = NewTrustMetricStore(historyDB, DefaultConfig()) store.SetLogger(log.TestingLogger()) - store.Start() + err = store.Start() + require.NoError(t, err) // Make sure we still have 0 entries assert.Zero(t, store.Size()) @@ -48,7 +49,8 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { tm := NewMetric() tm.SetTicker(tt[i]) - tm.Start() + err = tm.Start() + require.NoError(t, err) store.AddPeerTrustMetric(key, tm) tm.BadEvents(10) @@ -62,12 +64,14 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { tt[i].NextTick() } // Stop all the trust metrics and save - store.Stop() + err = store.Stop() + require.NoError(t, err) // Load the data from the DB store = NewTrustMetricStore(historyDB, DefaultConfig()) store.SetLogger(log.TestingLogger()) - store.Start() + err = store.Start() + require.NoError(t, err) // Check that we still have 100 peers with imperfect trust values assert.Equal(t, 100, store.Size()) @@ -75,11 +79,13 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { assert.NotEqual(t, 1.0, tm.TrustValue()) } - store.Stop() + err = store.Stop() + require.NoError(t, err) } func TestTrustMetricStoreConfig(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") + historyDB, err := dbm.NewDB("", "memdb", "") + require.NoError(t, err) config := MetricConfig{ ProportionalWeight: 0.5, @@ -89,7 +95,8 @@ func TestTrustMetricStoreConfig(t *testing.T) { // Create a store with custom config store := NewTrustMetricStore(historyDB, config) store.SetLogger(log.TestingLogger()) - store.Start() + err = store.Start() + require.NoError(t, err) // Have the store make us a metric with the config tm := store.GetPeerTrustMetric("TestKey") @@ -97,15 +104,18 @@ func TestTrustMetricStoreConfig(t *testing.T) { // Check that the options made it to the metric assert.Equal(t, 0.5, tm.proportionalWeight) assert.Equal(t, 0.5, tm.integralWeight) - store.Stop() + err = store.Stop() + require.NoError(t, err) } func TestTrustMetricStoreLookup(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") + historyDB, err := dbm.NewDB("", "memdb", "") + require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig()) store.SetLogger(log.TestingLogger()) - store.Start() + err = store.Start() + require.NoError(t, err) // Create 100 peers in the trust metric store for i := 0; i < 100; i++ { @@ -117,15 +127,18 @@ func TestTrustMetricStoreLookup(t *testing.T) { assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) } - store.Stop() + err = store.Stop() + require.NoError(t, err) } func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") + historyDB, err := dbm.NewDB("", "memdb", "") + require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig()) store.SetLogger(log.TestingLogger()) - store.Start() + err = store.Start() + require.NoError(t, err) key := "TestKey" tm := store.GetPeerTrustMetric(key) @@ -149,5 +162,6 @@ func TestTrustMetricStorePeerScore(t *testing.T) { // We will remember our experiences with this peer tm = store.GetPeerTrustMetric(key) assert.NotEqual(t, 100, tm.TrustScore()) - store.Stop() + err = store.Stop() + require.NoError(t, err) } diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index 6b006dbf1..c00530aca 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -46,7 +46,7 @@ func Discover() (nat NAT, err error) { return } socket := conn.(*net.UDPConn) - defer socket.Close() // nolint: errcheck + defer socket.Close() if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { return nil, err @@ -206,10 +206,10 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) { if err != nil { return } - defer r.Body.Close() // nolint: errcheck + defer r.Body.Close() if r.StatusCode >= 400 { - err = errors.New(string(r.StatusCode)) + err = errors.New(string(rune(r.StatusCode))) return } var root Root @@ -269,7 +269,7 @@ func soapRequest(url, function, message, domain string) (r *http.Response, err e } req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - //req.Header.Set("Transfer-Encoding", "chunked") + // req.Header.Set("Transfer-Encoding", "chunked") req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") req.Header.Set("Connection", "Close") req.Header.Set("Cache-Control", "no-cache") @@ -306,7 +306,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { var response *http.Response response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) if response != nil { - defer response.Body.Close() // nolint: errcheck + defer response.Body.Close() } if err != nil { return @@ -365,7 +365,7 @@ func (n *upnpNAT) AddPortMapping( var response *http.Response response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) if response != nil { - defer response.Body.Close() // nolint: errcheck + defer response.Body.Close() } if err != nil { return @@ -391,7 +391,7 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort var response *http.Response response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) if response != nil { - defer response.Body.Close() // nolint: errcheck + defer response.Body.Close() } if err != nil { return diff --git a/privval/codec.go b/privval/codec.go deleted file mode 100644 index d1f2eafa2..000000000 --- a/privval/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package privval - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) - RegisterRemoteSignerMsg(cdc) -} diff --git a/privval/doc.go b/privval/doc.go index 668e5ebc4..7695ffe9d 100644 --- a/privval/doc.go +++ b/privval/doc.go @@ -19,5 +19,11 @@ SignerDialerEndpoint SignerDialerEndpoint is a simple wrapper around a net.Conn. It's used by both IPCVal and TCPVal. +SignerClient + +SignerClient handles remote validator connections that provide signing services. +In production, it's recommended to wrap it with RetrySignerClient to avoid +termination in case of temporary errors. + */ package privval diff --git a/privval/errors.go b/privval/errors.go index 9f151f11d..297d5dca2 100644 --- a/privval/errors.go +++ b/privval/errors.go @@ -1,9 +1,11 @@ package privval import ( + "errors" "fmt" ) +// EndpointTimeoutError occurs when endpoint times out. type EndpointTimeoutError struct{} // Implement the net.Error interface. @@ -13,15 +15,15 @@ func (e EndpointTimeoutError) Temporary() bool { return true } // Socket errors. var ( - ErrUnexpectedResponse = fmt.Errorf("received unexpected response") - ErrNoConnection = fmt.Errorf("endpoint is not connected") ErrConnectionTimeout = EndpointTimeoutError{} - - ErrReadTimeout = fmt.Errorf("endpoint read timed out") - ErrWriteTimeout = fmt.Errorf("endpoint write timed out") + ErrNoConnection = errors.New("endpoint is not connected") + ErrReadTimeout = errors.New("endpoint read timed out") + ErrUnexpectedResponse = errors.New("empty response") + ErrWriteTimeout = errors.New("endpoint write timed out") ) -// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. +// RemoteSignerError allows (remote) validators to include meaningful error +// descriptions in their reply. type RemoteSignerError struct { // TODO(ismail): create an enum of known errors Code int diff --git a/privval/file.go b/privval/file.go index 5f07ac525..4482e05f2 100644 --- a/privval/file.go +++ b/privval/file.go @@ -7,11 +7,17 @@ import ( "io/ioutil" "time" + "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmjson "github.com/tendermint/tendermint/libs/json" tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/libs/tempfile" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -25,14 +31,14 @@ const ( ) // A vote is either stepPrevote or stepPrecommit. -func voteToStep(vote *types.Vote) int8 { +func voteToStep(vote *tmproto.Vote) int8 { switch vote.Type { - case types.PrevoteType: + case tmproto.PrevoteType: return stepPrevote - case types.PrecommitType: + case tmproto.PrecommitType: return stepPrecommit default: - panic("Unknown vote type") + panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) } } @@ -54,7 +60,7 @@ func (pvKey FilePVKey) Save() { panic("cannot save PrivValidator key: filePath not set") } - jsonBytes, err := cdc.MarshalJSONIndent(pvKey, "", " ") + jsonBytes, err := tmjson.MarshalIndent(pvKey, "", " ") if err != nil { panic(err) } @@ -70,7 +76,7 @@ func (pvKey FilePVKey) Save() { // FilePVLastSignState stores the mutable part of PrivValidator. type FilePVLastSignState struct { Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` Step int8 `json:"step"` Signature []byte `json:"signature,omitempty"` SignBytes tmbytes.HexBytes `json:"signbytes,omitempty"` @@ -85,7 +91,7 @@ type FilePVLastSignState struct { // it returns true if the HRS matches the arguments and the SignBytes are not empty (indicating // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. -func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bool, error) { +func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) @@ -125,7 +131,7 @@ func (lss *FilePVLastSignState) Save() { if outFile == "" { panic("cannot save FilePVLastSignState: filePath not set") } - jsonBytes, err := cdc.MarshalJSONIndent(lss, "", " ") + jsonBytes, err := tmjson.MarshalIndent(lss, "", " ") if err != nil { panic(err) } @@ -147,11 +153,8 @@ type FilePV struct { LastSignState FilePVLastSignState } -// GenFilePV generates a new validator with randomly generated private key -// and sets the filePaths, but does not call Save(). -func GenFilePV(keyFilePath, stateFilePath string) *FilePV { - privKey := ed25519.GenPrivKey() - +// NewFilePV generates a new validator from the given key and paths. +func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FilePV { return &FilePV{ Key: FilePVKey{ Address: privKey.PubKey().Address(), @@ -166,6 +169,19 @@ func GenFilePV(keyFilePath, stateFilePath string) *FilePV { } } +// GenFilePV generates a new validator with randomly generated private key +// and sets the filePaths, but does not call Save(). +func GenFilePV(keyFilePath, stateFilePath, keyType string) (*FilePV, error) { + switch keyType { + case types.ABCIPubKeyTypeSecp256k1: + return NewFilePV(secp256k1.GenPrivKey(), keyFilePath, stateFilePath), nil + case "", types.ABCIPubKeyTypeEd25519: + return NewFilePV(ed25519.GenPrivKey(), keyFilePath, stateFilePath), nil + default: + return nil, fmt.Errorf("key type: %s is not supported", keyType) + } +} + // LoadFilePV loads a FilePV from the filePaths. The FilePV handles double // signing prevention by persisting data to the stateFilePath. If either file path // does not exist, the program will exit. @@ -186,7 +202,7 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { tmos.Exit(err.Error()) } pvKey := FilePVKey{} - err = cdc.UnmarshalJSON(keyJSONBytes, &pvKey) + err = tmjson.Unmarshal(keyJSONBytes, &pvKey) if err != nil { tmos.Exit(fmt.Sprintf("Error reading PrivValidator key from %v: %v\n", keyFilePath, err)) } @@ -197,12 +213,13 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { pvKey.filePath = keyFilePath pvState := FilePVLastSignState{} + if loadState { stateJSONBytes, err := ioutil.ReadFile(stateFilePath) if err != nil { tmos.Exit(err.Error()) } - err = cdc.UnmarshalJSON(stateJSONBytes, &pvState) + err = tmjson.Unmarshal(stateJSONBytes, &pvState) if err != nil { tmos.Exit(fmt.Sprintf("Error reading PrivValidator state from %v: %v\n", stateFilePath, err)) } @@ -218,15 +235,18 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { // LoadOrGenFilePV loads a FilePV from the given filePaths // or else generates a new one and saves it to the filePaths. -func LoadOrGenFilePV(keyFilePath, stateFilePath string) *FilePV { - var pv *FilePV +func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { + var ( + pv *FilePV + err error + ) if tmos.FileExists(keyFilePath) { pv = LoadFilePV(keyFilePath, stateFilePath) } else { - pv = GenFilePV(keyFilePath, stateFilePath) + pv, err = GenFilePV(keyFilePath, stateFilePath, "") pv.Save() } - return pv + return pv, err } // GetAddress returns the address of the validator. @@ -243,7 +263,7 @@ func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { // SignVote signs a canonical representation of the vote, along with the // chainID. Implements PrivValidator. -func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { +func (pv *FilePV) SignVote(chainID string, vote *tmproto.Vote) error { if err := pv.signVote(chainID, vote); err != nil { return fmt.Errorf("error signing vote: %v", err) } @@ -252,7 +272,7 @@ func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { // SignProposal signs a canonical representation of the proposal, along with // the chainID. Implements PrivValidator. -func (pv *FilePV) SignProposal(chainID string, proposal *types.Proposal) error { +func (pv *FilePV) SignProposal(chainID string, proposal *tmproto.Proposal) error { if err := pv.signProposal(chainID, proposal); err != nil { return fmt.Errorf("error signing proposal: %v", err) } @@ -293,7 +313,7 @@ func (pv *FilePV) String() string { // signVote checks if the vote is good to sign and sets the vote signature. // It may need to set the timestamp as well if the vote is otherwise the same as // a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). -func (pv *FilePV) signVote(chainID string, vote *types.Vote) error { +func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { height, round, step := vote.Height, vote.Round, voteToStep(vote) lss := pv.LastSignState @@ -303,7 +323,7 @@ func (pv *FilePV) signVote(chainID string, vote *types.Vote) error { return err } - signBytes := vote.SignBytes(chainID) + signBytes := types.VoteSignBytes(chainID, vote) // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. @@ -335,7 +355,7 @@ func (pv *FilePV) signVote(chainID string, vote *types.Vote) error { // signProposal checks if the proposal is good to sign and sets the proposal signature. // It may need to set the timestamp as well if the proposal is otherwise the same as // a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). -func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error { +func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error { height, round, step := proposal.Height, proposal.Round, stepPropose lss := pv.LastSignState @@ -345,7 +365,7 @@ func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error { return err } - signBytes := proposal.SignBytes(chainID) + signBytes := types.ProposalSignBytes(chainID, proposal) // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. @@ -375,7 +395,7 @@ func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error { } // Persist height/round/step and signature -func (pv *FilePV) saveSigned(height int64, round int, step int8, +func (pv *FilePV) saveSigned(height int64, round int32, step int8, signBytes []byte, sig []byte) { pv.LastSignState.Height = height @@ -391,34 +411,31 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8, // returns the timestamp from the lastSignBytes. // returns true if the only difference in the votes is their timestamp. func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastVote, newVote types.CanonicalVote - if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastVote); err != nil { + var lastVote, newVote tmproto.CanonicalVote + if err := protoio.UnmarshalDelimited(lastSignBytes, &lastVote); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) } - if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newVote); err != nil { + if err := protoio.UnmarshalDelimited(newSignBytes, &newVote); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) } lastTime := lastVote.Timestamp - // set the times to the same value and check equality now := tmtime.Now() lastVote.Timestamp = now newVote.Timestamp = now - lastVoteBytes, _ := cdc.MarshalJSON(lastVote) - newVoteBytes, _ := cdc.MarshalJSON(newVote) - return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes) + return lastTime, proto.Equal(&newVote, &lastVote) } // returns the timestamp from the lastSignBytes. // returns true if the only difference in the proposals is their timestamp func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal types.CanonicalProposal - if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastProposal); err != nil { + var lastProposal, newProposal tmproto.CanonicalProposal + if err := protoio.UnmarshalDelimited(lastSignBytes, &lastProposal); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) } - if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newProposal); err != nil { + if err := protoio.UnmarshalDelimited(newSignBytes, &newProposal); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) } @@ -427,8 +444,6 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (ti now := tmtime.Now() lastProposal.Timestamp = now newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(lastProposal) - newProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(newProposal) - return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) + return lastTime, proto.Equal(&newProposal, &lastProposal) } diff --git a/privval/file_deprecated.go b/privval/file_deprecated.go deleted file mode 100644 index c30c273d7..000000000 --- a/privval/file_deprecated.go +++ /dev/null @@ -1,81 +0,0 @@ -package privval - -import ( - "io/ioutil" - "os" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" -) - -// OldFilePV is the old version of the FilePV, pre v0.28.0. -// Deprecated: Use FilePV instead. -type OldFilePV struct { - Address types.Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - LastHeight int64 `json:"last_height"` - LastRound int `json:"last_round"` - LastStep int8 `json:"last_step"` - LastSignature []byte `json:"last_signature,omitempty"` - LastSignBytes bytes.HexBytes `json:"last_signbytes,omitempty"` - PrivKey crypto.PrivKey `json:"priv_key"` - - filePath string -} - -// LoadOldFilePV loads an OldFilePV from the filePath. -func LoadOldFilePV(filePath string) (*OldFilePV, error) { - pvJSONBytes, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - pv := &OldFilePV{} - err = cdc.UnmarshalJSON(pvJSONBytes, &pv) - if err != nil { - return nil, err - } - - // overwrite pubkey and address for convenience - pv.PubKey = pv.PrivKey.PubKey() - pv.Address = pv.PubKey.Address() - - pv.filePath = filePath - return pv, nil -} - -// Upgrade convets the OldFilePV to the new FilePV, separating the immutable and mutable components, -// and persisting them to the keyFilePath and stateFilePath, respectively. -// It renames the original file by adding ".bak". -func (oldFilePV *OldFilePV) Upgrade(keyFilePath, stateFilePath string) *FilePV { - privKey := oldFilePV.PrivKey - pvKey := FilePVKey{ - PrivKey: privKey, - PubKey: privKey.PubKey(), - Address: privKey.PubKey().Address(), - filePath: keyFilePath, - } - - pvState := FilePVLastSignState{ - Height: oldFilePV.LastHeight, - Round: oldFilePV.LastRound, - Step: oldFilePV.LastStep, - Signature: oldFilePV.LastSignature, - SignBytes: oldFilePV.LastSignBytes, - filePath: stateFilePath, - } - - // Save the new PV files - pv := &FilePV{ - Key: pvKey, - LastSignState: pvState, - } - pv.Save() - - // Rename the old PV file - err := os.Rename(oldFilePV.filePath, oldFilePV.filePath+".bak") - if err != nil { - panic(err) - } - return pv -} diff --git a/privval/file_deprecated_test.go b/privval/file_deprecated_test.go deleted file mode 100644 index f850c23f1..000000000 --- a/privval/file_deprecated_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package privval_test - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/privval" -) - -const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20F" + - "C258973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4" + - "CAD312240A20C971B286ACB8AAA6FCA0365EB0A660B189EDC08B46B5AF2" + - "995DEFA51A28D215B10013211746573742D636861696E2D533245415533" - -const oldPrivvalContent = `{ - "address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "r3Yg2AhDZ745CNTpavsGU+mRZ8WpRXqoJuyqjN8mJq0=" - }, - "last_height": "5", - "last_round": "0", - "last_step": 3, - "last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==", - "last_signbytes": "` + lastSignBytes + `", - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ==" - } -}` - -func TestLoadAndUpgrade(t *testing.T) { - - oldFilePath := initTmpOldFile(t) - defer os.Remove(oldFilePath) - newStateFile, err := ioutil.TempFile("", "priv_validator_state*.json") - defer os.Remove(newStateFile.Name()) - require.NoError(t, err) - newKeyFile, err := ioutil.TempFile("", "priv_validator_key*.json") - defer os.Remove(newKeyFile.Name()) - require.NoError(t, err) - - oldPV, err := privval.LoadOldFilePV(oldFilePath) - assert.NoError(t, err) - newPV := oldPV.Upgrade(newKeyFile.Name(), newStateFile.Name()) - - assertEqualPV(t, oldPV, newPV) - assert.NoError(t, err) - upgradedPV := privval.LoadFilePV(newKeyFile.Name(), newStateFile.Name()) - assertEqualPV(t, oldPV, upgradedPV) - oldPV, err = privval.LoadOldFilePV(oldFilePath + ".bak") - require.NoError(t, err) - assertEqualPV(t, oldPV, upgradedPV) -} - -func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV) { - assert.Equal(t, oldPV.Address, newPV.Key.Address) - assert.Equal(t, oldPV.Address, newPV.GetAddress()) - assert.Equal(t, oldPV.PubKey, newPV.Key.PubKey) - npv, err := newPV.GetPubKey() - require.NoError(t, err) - assert.Equal(t, oldPV.PubKey, npv) - assert.Equal(t, oldPV.PrivKey, newPV.Key.PrivKey) - - assert.Equal(t, oldPV.LastHeight, newPV.LastSignState.Height) - assert.Equal(t, oldPV.LastRound, newPV.LastSignState.Round) - assert.Equal(t, oldPV.LastSignature, newPV.LastSignState.Signature) - assert.Equal(t, oldPV.LastSignBytes, newPV.LastSignState.SignBytes) - assert.Equal(t, oldPV.LastStep, newPV.LastSignState.Step) -} - -func initTmpOldFile(t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "priv_validator_*.json") - require.NoError(t, err) - t.Logf("created test file %s", tmpFile.Name()) - _, err = tmpFile.WriteString(oldPrivvalContent) - require.NoError(t, err) - - return tmpFile.Name() -} diff --git a/privval/file_test.go b/privval/file_test.go index 343131e1a..1d9c65fa3 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -12,6 +12,10 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" + tmjson "github.com/tendermint/tendermint/libs/json" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -24,7 +28,8 @@ func TestGenLoadValidator(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) height := int64(100) privVal.LastSignState.Height = height @@ -42,18 +47,20 @@ func TestResetValidator(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) emptyState := FilePVLastSignState{filePath: tempStateFile.Name()} // new priv val has empty state assert.Equal(t, privVal.LastSignState, emptyState) // test vote - height, round := int64(10), 1 - voteType := byte(types.PrevoteType) - blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} + height, round := int64(10), int32(1) + voteType := tmproto.PrevoteType + randBytes := tmrand.Bytes(tmhash.Size) + blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) - err = privVal.SignVote("mychainid", vote) + err = privVal.SignVote("mychainid", vote.ToProto()) assert.NoError(t, err, "expected no error signing vote") // priv val after signing is not same as empty @@ -81,9 +88,11 @@ func TestLoadOrGenValidator(t *testing.T) { t.Error(err) } - privVal := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + privVal, err := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + require.NoError(t, err) addr := privVal.GetAddress() - privVal = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + privVal, err = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) + require.NoError(t, err) assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") } @@ -93,12 +102,12 @@ func TestUnmarshalValidatorState(t *testing.T) { // create some fixed values serialized := `{ "height": "1", - "round": "1", + "round": 1, "step": 1 }` val := FilePVLastSignState{} - err := cdc.UnmarshalJSON([]byte(serialized), &val) + err := tmjson.Unmarshal([]byte(serialized), &val) require.Nil(err, "%+v", err) // make sure the values match @@ -107,7 +116,7 @@ func TestUnmarshalValidatorState(t *testing.T) { assert.EqualValues(val.Step, 1) // export it and make sure it is the same - out, err := cdc.MarshalJSON(val) + out, err := tmjson.Marshal(val) require.Nil(err, "%+v", err) assert.JSONEq(serialized, string(out)) } @@ -119,10 +128,8 @@ func TestUnmarshalValidatorKey(t *testing.T) { privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() addr := pubKey.Address() - pubArray := [32]byte(pubKey.(ed25519.PubKeyEd25519)) - pubBytes := pubArray[:] - privArray := [64]byte(privKey) - privBytes := privArray[:] + pubBytes := pubKey.Bytes() + privBytes := privKey.Bytes() pubB64 := base64.StdEncoding.EncodeToString(pubBytes) privB64 := base64.StdEncoding.EncodeToString(privBytes) @@ -139,7 +146,7 @@ func TestUnmarshalValidatorKey(t *testing.T) { }`, addr, pubB64, privB64) val := FilePVKey{} - err := cdc.UnmarshalJSON([]byte(serialized), &val) + err := tmjson.Unmarshal([]byte(serialized), &val) require.Nil(err, "%+v", err) // make sure the values match @@ -148,7 +155,7 @@ func TestUnmarshalValidatorKey(t *testing.T) { assert.EqualValues(privKey, val.PrivKey) // export it and make sure it is the same - out, err := cdc.MarshalJSON(val) + out, err := tmjson.Marshal(val) require.Nil(err, "%+v", err) assert.JSONEq(serialized, string(out)) } @@ -161,21 +168,28 @@ func TestSignVote(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) - block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} - block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{}} + randbytes := tmrand.Bytes(tmhash.Size) + randbytes2 := tmrand.Bytes(tmhash.Size) - height, round := int64(10), 1 - voteType := byte(types.PrevoteType) + block1 := types.BlockID{Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} + block2 := types.BlockID{Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + + height, round := int64(10), int32(1) + voteType := tmproto.PrevoteType // sign a vote for first time vote := newVote(privVal.Key.Address, 0, height, round, voteType, block1) - err = privVal.SignVote("mychainid", vote) + v := vote.ToProto() + err = privVal.SignVote("mychainid", v) assert.NoError(err, "expected no error signing vote") // try to sign the same vote again; should be fine - err = privVal.SignVote("mychainid", vote) + err = privVal.SignVote("mychainid", v) assert.NoError(err, "expected no error on signing same vote") // now try some bad votes @@ -187,14 +201,15 @@ func TestSignVote(t *testing.T) { } for _, c := range cases { - err = privVal.SignVote("mychainid", c) + cpb := c.ToProto() + err = privVal.SignVote("mychainid", cpb) assert.Error(err, "expected error on signing conflicting vote") } // try signing a vote with a different time stamp sig := vote.Signature vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) - err = privVal.SignVote("mychainid", vote) + err = privVal.SignVote("mychainid", v) assert.NoError(err) assert.Equal(sig, vote.Signature) } @@ -207,19 +222,26 @@ func TestSignProposal(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) + + randbytes := tmrand.Bytes(tmhash.Size) + randbytes2 := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}} - block2 := types.BlockID{Hash: []byte{3, 2, 1}, PartsHeader: types.PartSetHeader{Total: 10, Hash: []byte{3, 2, 1}}} - height, round := int64(10), 1 + block1 := types.BlockID{Hash: randbytes, + PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} + block2 := types.BlockID{Hash: randbytes2, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + height, round := int64(10), int32(1) // sign a proposal for first time proposal := newProposal(height, round, block1) - err = privVal.SignProposal("mychainid", proposal) + pbp := proposal.ToProto() + err = privVal.SignProposal("mychainid", pbp) assert.NoError(err, "expected no error signing proposal") // try to sign the same proposal again; should be fine - err = privVal.SignProposal("mychainid", proposal) + err = privVal.SignProposal("mychainid", pbp) assert.NoError(err, "expected no error on signing same proposal") // now try some bad Proposals @@ -231,14 +253,14 @@ func TestSignProposal(t *testing.T) { } for _, c := range cases { - err = privVal.SignProposal("mychainid", c) + err = privVal.SignProposal("mychainid", c.ToProto()) assert.Error(err, "expected error on signing conflicting proposal") } // try signing a proposal with a different time stamp sig := proposal.Signature proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - err = privVal.SignProposal("mychainid", proposal) + err = privVal.SignProposal("mychainid", pbp) assert.NoError(err) assert.Equal(sig, proposal.Signature) } @@ -249,71 +271,76 @@ func TestDifferByTimestamp(t *testing.T) { tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") require.Nil(t, err) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - - block1 := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{Total: 5, Hash: []byte{1, 2, 3}}} - height, round := int64(10), 1 + privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") + require.NoError(t, err) + randbytes := tmrand.Bytes(tmhash.Size) + block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} + height, round := int64(10), int32(1) chainID := "mychainid" // test proposal { proposal := newProposal(height, round, block1) - err := privVal.SignProposal(chainID, proposal) + pb := proposal.ToProto() + err := privVal.SignProposal(chainID, pb) assert.NoError(t, err, "expected no error signing proposal") - signBytes := proposal.SignBytes(chainID) + signBytes := types.ProposalSignBytes(chainID, pb) + sig := proposal.Signature timeStamp := proposal.Timestamp // manipulate the timestamp. should get changed back - proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond) + pb.Timestamp = pb.Timestamp.Add(time.Millisecond) var emptySig []byte proposal.Signature = emptySig - err = privVal.SignProposal("mychainid", proposal) + err = privVal.SignProposal("mychainid", pb) assert.NoError(t, err, "expected no error on signing same proposal") - assert.Equal(t, timeStamp, proposal.Timestamp) - assert.Equal(t, signBytes, proposal.SignBytes(chainID)) + assert.Equal(t, timeStamp, pb.Timestamp) + assert.Equal(t, signBytes, types.ProposalSignBytes(chainID, pb)) assert.Equal(t, sig, proposal.Signature) } // test vote { - voteType := byte(types.PrevoteType) - blockID := types.BlockID{Hash: []byte{1, 2, 3}, PartsHeader: types.PartSetHeader{}} + voteType := tmproto.PrevoteType + blockID := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) - err := privVal.SignVote("mychainid", vote) + v := vote.ToProto() + err := privVal.SignVote("mychainid", v) assert.NoError(t, err, "expected no error signing vote") - signBytes := vote.SignBytes(chainID) - sig := vote.Signature + signBytes := types.VoteSignBytes(chainID, v) + sig := v.Signature timeStamp := vote.Timestamp // manipulate the timestamp. should get changed back - vote.Timestamp = vote.Timestamp.Add(time.Millisecond) + v.Timestamp = v.Timestamp.Add(time.Millisecond) var emptySig []byte - vote.Signature = emptySig - err = privVal.SignVote("mychainid", vote) + v.Signature = emptySig + err = privVal.SignVote("mychainid", v) assert.NoError(t, err, "expected no error on signing same vote") - assert.Equal(t, timeStamp, vote.Timestamp) - assert.Equal(t, signBytes, vote.SignBytes(chainID)) - assert.Equal(t, sig, vote.Signature) + assert.Equal(t, timeStamp, v.Timestamp) + assert.Equal(t, signBytes, types.VoteSignBytes(chainID, v)) + assert.Equal(t, sig, v.Signature) } } -func newVote(addr types.Address, idx int, height int64, round int, typ byte, blockID types.BlockID) *types.Vote { +func newVote(addr types.Address, idx int32, height int64, round int32, + typ tmproto.SignedMsgType, blockID types.BlockID) *types.Vote { return &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: height, Round: round, - Type: types.SignedMsgType(typ), + Type: typ, Timestamp: tmtime.Now(), BlockID: blockID, } } -func newProposal(height int64, round int, blockID types.BlockID) *types.Proposal { +func newProposal(height int64, round int32, blockID types.BlockID) *types.Proposal { return &types.Proposal{ Height: height, Round: round, diff --git a/privval/messages.go b/privval/messages.go deleted file mode 100644 index fa7a0b09d..000000000 --- a/privval/messages.go +++ /dev/null @@ -1,65 +0,0 @@ -package privval - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/types" -) - -// SignerMessage is sent between Signer Clients and Servers. -type SignerMessage interface{} - -func RegisterRemoteSignerMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*SignerMessage)(nil), nil) - cdc.RegisterConcrete(&PubKeyRequest{}, "tendermint/remotesigner/PubKeyRequest", nil) - cdc.RegisterConcrete(&PubKeyResponse{}, "tendermint/remotesigner/PubKeyResponse", nil) - cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil) - cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) - cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) - cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) - - cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) - cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) -} - -// TODO: Add ChainIDRequest - -// PubKeyRequest requests the consensus public key from the remote signer. -type PubKeyRequest struct{} - -// PubKeyResponse is a response message containing the public key. -type PubKeyResponse struct { - PubKey crypto.PubKey - Error *RemoteSignerError -} - -// SignVoteRequest is a request to sign a vote -type SignVoteRequest struct { - Vote *types.Vote -} - -// SignedVoteResponse is a response containing a signed vote or an error -type SignedVoteResponse struct { - Vote *types.Vote - Error *RemoteSignerError -} - -// SignProposalRequest is a request to sign a proposal -type SignProposalRequest struct { - Proposal *types.Proposal -} - -// SignedProposalResponse is response containing a signed proposal or an error -type SignedProposalResponse struct { - Proposal *types.Proposal - Error *RemoteSignerError -} - -// PingRequest is a request to confirm that the connection is alive. -type PingRequest struct { -} - -// PingResponse is a response to confirm that the connection is alive. -type PingResponse struct { -} diff --git a/privval/msgs.go b/privval/msgs.go new file mode 100644 index 000000000..bcfed629b --- /dev/null +++ b/privval/msgs.go @@ -0,0 +1,40 @@ +package privval + +import ( + "fmt" + + "github.com/gogo/protobuf/proto" + + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" +) + +// TODO: Add ChainIDRequest + +func mustWrapMsg(pb proto.Message) privvalproto.Message { + msg := privvalproto.Message{} + + switch pb := pb.(type) { + case *privvalproto.Message: + msg = *pb + case *privvalproto.PubKeyRequest: + msg.Sum = &privvalproto.Message_PubKeyRequest{PubKeyRequest: pb} + case *privvalproto.PubKeyResponse: + msg.Sum = &privvalproto.Message_PubKeyResponse{PubKeyResponse: pb} + case *privvalproto.SignVoteRequest: + msg.Sum = &privvalproto.Message_SignVoteRequest{SignVoteRequest: pb} + case *privvalproto.SignedVoteResponse: + msg.Sum = &privvalproto.Message_SignedVoteResponse{SignedVoteResponse: pb} + case *privvalproto.SignedProposalResponse: + msg.Sum = &privvalproto.Message_SignedProposalResponse{SignedProposalResponse: pb} + case *privvalproto.SignProposalRequest: + msg.Sum = &privvalproto.Message_SignProposalRequest{SignProposalRequest: pb} + case *privvalproto.PingRequest: + msg.Sum = &privvalproto.Message_PingRequest{PingRequest: pb} + case *privvalproto.PingResponse: + msg.Sum = &privvalproto.Message_PingResponse{PingResponse: pb} + default: + panic(fmt.Errorf("unknown message type %T", pb)) + } + + return msg +} diff --git a/privval/msgs_test.go b/privval/msgs_test.go new file mode 100644 index 000000000..bf532bd7b --- /dev/null +++ b/privval/msgs_test.go @@ -0,0 +1,104 @@ +package privval + +import ( + "encoding/hex" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/tmhash" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" + privproto "github.com/tendermint/tendermint/proto/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +var stamp = time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC) + +func exampleVote() *types.Vote { + return &types.Vote{ + Type: tmproto.SignedMsgType(1), + Height: 3, + Round: 2, + Timestamp: stamp, + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: 56789, + } +} + +func exampleProposal() *types.Proposal { + + return &types.Proposal{ + Type: tmproto.SignedMsgType(1), + Height: 3, + Round: 2, + Timestamp: stamp, + POLRound: 2, + Signature: []byte("it's a signature"), + BlockID: types.BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: types.PartSetHeader{ + Total: 1000000, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + } +} + +// nolint:lll // ignore line length for tests +func TestPrivvalVectors(t *testing.T) { + pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() + ppk, err := cryptoenc.PubKeyToProto(pk) + require.NoError(t, err) + + // Generate a simple vote + vote := exampleVote() + votepb := vote.ToProto() + + // Generate a simple proposal + proposal := exampleProposal() + proposalpb := proposal.ToProto() + + // Create a Reuseable remote error + remoteError := &privproto.RemoteSignerError{Code: 1, Description: "it's a error"} + + testCases := []struct { + testName string + msg proto.Message + expBytes string + }{ + {"ping request", &privproto.PingRequest{}, "3a00"}, + {"ping response", &privproto.PingResponse{}, "4200"}, + {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, + {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12240a220a20556a436f1218d30942efe798420f51dc9b6a311b929c578257457d05c5fcf230"}, + {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, + {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1a760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, + {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22760a74080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0608f49a8ded0532146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03"}, + {"Vote Response with error", &privproto.SignedVoteResponse{Vote: tmproto.Vote{}, Error: remoteError}, "22250a11220212002a0b088092b8c398feffffff0112100801120c697427732061206572726f72"}, + {"Proposal Request", &privproto.SignProposalRequest{Proposal: proposalpb}, "2a700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, + {"Proposal Response", &privproto.SignedProposalResponse{Proposal: *proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, + {"Proposal Response with error", &privproto.SignedProposalResponse{Proposal: tmproto.Proposal{}, Error: remoteError}, "32250a112a021200320b088092b8c398feffffff0112100801120c697427732061206572726f72"}, + } + + for _, tc := range testCases { + tc := tc + + pm := mustWrapMsg(tc.msg) + bz, err := pm.Marshal() + require.NoError(t, err, tc.testName) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } +} diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go new file mode 100644 index 000000000..92a7d0655 --- /dev/null +++ b/privval/retry_signer_client.go @@ -0,0 +1,96 @@ +package privval + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// RetrySignerClient wraps SignerClient adding retry for each operation (except +// Ping) w/ a timeout. +type RetrySignerClient struct { + next *SignerClient + retries int + timeout time.Duration +} + +// NewRetrySignerClient returns RetrySignerClient. If +retries+ is 0, the +// client will be retrying each operation indefinitely. +func NewRetrySignerClient(sc *SignerClient, retries int, timeout time.Duration) *RetrySignerClient { + return &RetrySignerClient{sc, retries, timeout} +} + +var _ types.PrivValidator = (*RetrySignerClient)(nil) + +func (sc *RetrySignerClient) Close() error { + return sc.next.Close() +} + +func (sc *RetrySignerClient) IsConnected() bool { + return sc.next.IsConnected() +} + +func (sc *RetrySignerClient) WaitForConnection(maxWait time.Duration) error { + return sc.next.WaitForConnection(maxWait) +} + +//-------------------------------------------------------- +// Implement PrivValidator + +func (sc *RetrySignerClient) Ping() error { + return sc.next.Ping() +} + +func (sc *RetrySignerClient) GetPubKey() (crypto.PubKey, error) { + var ( + pk crypto.PubKey + err error + ) + for i := 0; i < sc.retries || sc.retries == 0; i++ { + pk, err = sc.next.GetPubKey() + if err == nil { + return pk, nil + } + // If remote signer errors, we don't retry. + if _, ok := err.(*RemoteSignerError); ok { + return nil, err + } + time.Sleep(sc.timeout) + } + return nil, fmt.Errorf("exhausted all attempts to get pubkey: %w", err) +} + +func (sc *RetrySignerClient) SignVote(chainID string, vote *tmproto.Vote) error { + var err error + for i := 0; i < sc.retries || sc.retries == 0; i++ { + err = sc.next.SignVote(chainID, vote) + if err == nil { + return nil + } + // If remote signer errors, we don't retry. + if _, ok := err.(*RemoteSignerError); ok { + return err + } + time.Sleep(sc.timeout) + } + return fmt.Errorf("exhausted all attempts to sign vote: %w", err) +} + +func (sc *RetrySignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) error { + var err error + for i := 0; i < sc.retries || sc.retries == 0; i++ { + err = sc.next.SignProposal(chainID, proposal) + if err == nil { + return nil + } + // If remote signer errors, we don't retry. + if _, ok := err.(*RemoteSignerError); ok { + return err + } + time.Sleep(sc.timeout) + } + return fmt.Errorf("exhausted all attempts to sign proposal: %w", err) +} diff --git a/privval/secret_connection.go b/privval/secret_connection.go new file mode 100644 index 000000000..10f7653d7 --- /dev/null +++ b/privval/secret_connection.go @@ -0,0 +1,469 @@ +package privval + +import ( + "bytes" + "crypto/cipher" + crand "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net" + "time" + + gogotypes "github.com/gogo/protobuf/types" + "github.com/gtank/merlin" + pool "github.com/libp2p/go-buffer-pool" + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/curve25519" + "golang.org/x/crypto/hkdf" + "golang.org/x/crypto/nacl/box" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/libs/async" + "github.com/tendermint/tendermint/libs/protoio" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmprivval "github.com/tendermint/tendermint/proto/tendermint/privval" +) + +// This code has been duplicated from p2p/conn prior to the P2P refactor. +// It is left here temporarily until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 + +// 4 + 1024 == 1028 total frame size +const ( + dataLenSize = 4 + dataMaxSize = 1024 + totalFrameSize = dataMaxSize + dataLenSize + aeadSizeOverhead = 16 // overhead of poly 1305 authentication tag + aeadKeySize = chacha20poly1305.KeySize + aeadNonceSize = chacha20poly1305.NonceSize +) + +var ( + ErrSmallOrderRemotePubKey = errors.New("detected low order point from remote peer") + + labelEphemeralLowerPublicKey = []byte("EPHEMERAL_LOWER_PUBLIC_KEY") + labelEphemeralUpperPublicKey = []byte("EPHEMERAL_UPPER_PUBLIC_KEY") + labelDHSecret = []byte("DH_SECRET") + labelSecretConnectionMac = []byte("SECRET_CONNECTION_MAC") + + secretConnKeyAndChallengeGen = []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN") +) + +// SecretConnection implements net.Conn. +// It is an implementation of the STS protocol. +// See https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf for +// details on the protocol. +// +// Consumers of the SecretConnection are responsible for authenticating +// the remote peer's pubkey against known information, like a nodeID. +// Otherwise they are vulnerable to MITM. +// (TODO(ismail): see also https://github.com/tendermint/tendermint/issues/3010) +type SecretConnection struct { + + // immutable + recvAead cipher.AEAD + sendAead cipher.AEAD + + remPubKey crypto.PubKey + conn io.ReadWriteCloser + + // net.Conn must be thread safe: + // https://golang.org/pkg/net/#Conn. + // Since we have internal mutable state, + // we need mtxs. But recv and send states + // are independent, so we can use two mtxs. + // All .Read are covered by recvMtx, + // all .Write are covered by sendMtx. + recvMtx tmsync.Mutex + recvBuffer []byte + recvNonce *[aeadNonceSize]byte + + sendMtx tmsync.Mutex + sendNonce *[aeadNonceSize]byte +} + +// MakeSecretConnection performs handshake and returns a new authenticated +// SecretConnection. +// Returns nil if there is an error in handshake. +// Caller should call conn.Close() +// See docs/sts-final.pdf for more information. +func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) { + var ( + locPubKey = locPrivKey.PubKey() + ) + + // Generate ephemeral keys for perfect forward secrecy. + locEphPub, locEphPriv := genEphKeys() + + // Write local ephemeral pubkey and receive one too. + // NOTE: every 32-byte string is accepted as a Curve25519 public key (see + // DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf) + remEphPub, err := shareEphPubKey(conn, locEphPub) + if err != nil { + return nil, err + } + + // Sort by lexical order. + loEphPub, hiEphPub := sort32(locEphPub, remEphPub) + + transcript := merlin.NewTranscript("TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH") + + transcript.AppendMessage(labelEphemeralLowerPublicKey, loEphPub[:]) + transcript.AppendMessage(labelEphemeralUpperPublicKey, hiEphPub[:]) + + // Check if the local ephemeral public key was the least, lexicographically + // sorted. + locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) + + // Compute common diffie hellman secret using X25519. + dhSecret, err := computeDHSecret(remEphPub, locEphPriv) + if err != nil { + return nil, err + } + + transcript.AppendMessage(labelDHSecret, dhSecret[:]) + + // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 + // on the transcript state (which itself also uses HKDF-SHA2 to derive a key + // from the dhSecret). + recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) + + const challengeSize = 32 + var challenge [challengeSize]byte + challengeSlice := transcript.ExtractBytes(labelSecretConnectionMac, challengeSize) + + copy(challenge[:], challengeSlice[0:challengeSize]) + + sendAead, err := chacha20poly1305.New(sendSecret[:]) + if err != nil { + return nil, errors.New("invalid send SecretConnection Key") + } + recvAead, err := chacha20poly1305.New(recvSecret[:]) + if err != nil { + return nil, errors.New("invalid receive SecretConnection Key") + } + + sc := &SecretConnection{ + conn: conn, + recvBuffer: nil, + recvNonce: new([aeadNonceSize]byte), + sendNonce: new([aeadNonceSize]byte), + recvAead: recvAead, + sendAead: sendAead, + } + + // Sign the challenge bytes for authentication. + locSignature, err := signChallenge(&challenge, locPrivKey) + if err != nil { + return nil, err + } + + // Share (in secret) each other's pubkey & challenge signature + authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) + if err != nil { + return nil, err + } + + remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig + if _, ok := remPubKey.(ed25519.PubKey); !ok { + return nil, fmt.Errorf("expected ed25519 pubkey, got %T", remPubKey) + } + if !remPubKey.VerifySignature(challenge[:], remSignature) { + return nil, errors.New("challenge verification failed") + } + + // We've authorized. + sc.remPubKey = remPubKey + return sc, nil +} + +// RemotePubKey returns authenticated remote pubkey +func (sc *SecretConnection) RemotePubKey() crypto.PubKey { + return sc.remPubKey +} + +// Writes encrypted frames of `totalFrameSize + aeadSizeOverhead`. +// CONTRACT: data smaller than dataMaxSize is written atomically. +func (sc *SecretConnection) Write(data []byte) (n int, err error) { + sc.sendMtx.Lock() + defer sc.sendMtx.Unlock() + + for 0 < len(data) { + if err := func() error { + var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) + var frame = pool.Get(totalFrameSize) + defer func() { + pool.Put(sealedFrame) + pool.Put(frame) + }() + var chunk []byte + if dataMaxSize < len(data) { + chunk = data[:dataMaxSize] + data = data[dataMaxSize:] + } else { + chunk = data + data = nil + } + chunkLength := len(chunk) + binary.LittleEndian.PutUint32(frame, uint32(chunkLength)) + copy(frame[dataLenSize:], chunk) + + // encrypt the frame + sc.sendAead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil) + incrNonce(sc.sendNonce) + // end encryption + + _, err = sc.conn.Write(sealedFrame) + if err != nil { + return err + } + n += len(chunk) + return nil + }(); err != nil { + return n, err + } + } + return n, err +} + +// CONTRACT: data smaller than dataMaxSize is read atomically. +func (sc *SecretConnection) Read(data []byte) (n int, err error) { + sc.recvMtx.Lock() + defer sc.recvMtx.Unlock() + + // read off and update the recvBuffer, if non-empty + if 0 < len(sc.recvBuffer) { + n = copy(data, sc.recvBuffer) + sc.recvBuffer = sc.recvBuffer[n:] + return + } + + // read off the conn + var sealedFrame = pool.Get(aeadSizeOverhead + totalFrameSize) + defer pool.Put(sealedFrame) + _, err = io.ReadFull(sc.conn, sealedFrame) + if err != nil { + return + } + + // decrypt the frame. + // reads and updates the sc.recvNonce + var frame = pool.Get(totalFrameSize) + defer pool.Put(frame) + _, err = sc.recvAead.Open(frame[:0], sc.recvNonce[:], sealedFrame, nil) + if err != nil { + return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) + } + incrNonce(sc.recvNonce) + // end decryption + + // copy checkLength worth into data, + // set recvBuffer to the rest. + var chunkLength = binary.LittleEndian.Uint32(frame) // read the first four bytes + if chunkLength > dataMaxSize { + return 0, errors.New("chunkLength is greater than dataMaxSize") + } + var chunk = frame[dataLenSize : dataLenSize+chunkLength] + n = copy(data, chunk) + if n < len(chunk) { + sc.recvBuffer = make([]byte, len(chunk)-n) + copy(sc.recvBuffer, chunk[n:]) + } + return n, err +} + +// Implements net.Conn +// nolint +func (sc *SecretConnection) Close() error { return sc.conn.Close() } +func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } +func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } +func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) } +func (sc *SecretConnection) SetReadDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetReadDeadline(t) +} +func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { + return sc.conn.(net.Conn).SetWriteDeadline(t) +} + +func genEphKeys() (ephPub, ephPriv *[32]byte) { + var err error + // TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek), + // we do not "clamp" the private key scalar: + // see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74 + ephPub, ephPriv, err = box.GenerateKey(crand.Reader) + if err != nil { + panic("Could not generate ephemeral key-pair") + } + return +} + +func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { + + // Send our pubkey and receive theirs in tandem. + var trs, _ = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + lc := *locEphPub + _, err = protoio.NewDelimitedWriter(conn).WriteMsg(&gogotypes.BytesValue{Value: lc[:]}) + if err != nil { + return nil, true, err // abort + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + var bytes gogotypes.BytesValue + err = protoio.NewDelimitedReader(conn, 1024*1024).ReadMsg(&bytes) + if err != nil { + return nil, true, err // abort + } + + var _remEphPub [32]byte + copy(_remEphPub[:], bytes.Value) + return _remEphPub, false, nil + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + // Otherwise: + var _remEphPub = trs.FirstValue().([32]byte) + return &_remEphPub, nil +} + +func deriveSecrets( + dhSecret *[32]byte, + locIsLeast bool, +) (recvSecret, sendSecret *[aeadKeySize]byte) { + hash := sha256.New + hkdf := hkdf.New(hash, dhSecret[:], nil, secretConnKeyAndChallengeGen) + // get enough data for 2 aead keys, and a 32 byte challenge + res := new([2*aeadKeySize + 32]byte) + _, err := io.ReadFull(hkdf, res[:]) + if err != nil { + panic(err) + } + + recvSecret = new([aeadKeySize]byte) + sendSecret = new([aeadKeySize]byte) + + // bytes 0 through aeadKeySize - 1 are one aead key. + // bytes aeadKeySize through 2*aeadKeySize -1 are another aead key. + // which key corresponds to sending and receiving key depends on whether + // the local key is less than the remote key. + if locIsLeast { + copy(recvSecret[:], res[0:aeadKeySize]) + copy(sendSecret[:], res[aeadKeySize:aeadKeySize*2]) + } else { + copy(sendSecret[:], res[0:aeadKeySize]) + copy(recvSecret[:], res[aeadKeySize:aeadKeySize*2]) + } + + return +} + +// computeDHSecret computes a Diffie-Hellman shared secret key +// from our own local private key and the other's public key. +func computeDHSecret(remPubKey, locPrivKey *[32]byte) (*[32]byte, error) { + shrKey, err := curve25519.X25519(locPrivKey[:], remPubKey[:]) + if err != nil { + return nil, err + } + var shrKeyArray [32]byte + copy(shrKeyArray[:], shrKey) + return &shrKeyArray, nil +} + +func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { + if bytes.Compare(foo[:], bar[:]) < 0 { + lo = foo + hi = bar + } else { + lo = bar + hi = foo + } + return +} + +func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) ([]byte, error) { + signature, err := locPrivKey.Sign(challenge[:]) + if err != nil { + return nil, err + } + return signature, nil +} + +type authSigMessage struct { + Key crypto.PubKey + Sig []byte +} + +func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte) (recvMsg authSigMessage, err error) { + + // Send our info and receive theirs in tandem. + var trs, _ = async.Parallel( + func(_ int) (val interface{}, abort bool, err error) { + pbpk, err := cryptoenc.PubKeyToProto(pubKey) + if err != nil { + return nil, true, err + } + _, err = protoio.NewDelimitedWriter(sc).WriteMsg(&tmprivval.AuthSigMessage{PubKey: pbpk, Sig: signature}) + if err != nil { + return nil, true, err // abort + } + return nil, false, nil + }, + func(_ int) (val interface{}, abort bool, err error) { + var pba tmprivval.AuthSigMessage + err = protoio.NewDelimitedReader(sc, 1024*1024).ReadMsg(&pba) + if err != nil { + return nil, true, err // abort + } + + pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + if err != nil { + return nil, true, err // abort + } + + _recvMsg := authSigMessage{ + Key: pk, + Sig: pba.Sig, + } + return _recvMsg, false, nil + }, + ) + + // If error: + if trs.FirstError() != nil { + err = trs.FirstError() + return + } + + var _recvMsg = trs.FirstValue().(authSigMessage) + return _recvMsg, nil +} + +//-------------------------------------------------------------------------------- + +// Increment nonce little-endian by 1 with wraparound. +// Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four +// bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes +// (little-endian in nonce[4:]). +func incrNonce(nonce *[aeadNonceSize]byte) { + counter := binary.LittleEndian.Uint64(nonce[4:]) + if counter == math.MaxUint64 { + // Terminates the session and makes sure the nonce would not re-used. + // See https://github.com/tendermint/tendermint/issues/3531 + panic("can't increase nonce without overflow") + } + counter++ + binary.LittleEndian.PutUint64(nonce[4:], counter) +} diff --git a/privval/signer_client.go b/privval/signer_client.go index 3e69c6c08..aecb0381e 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -4,9 +4,10 @@ import ( "fmt" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -14,20 +15,21 @@ import ( // Handles remote validator connections that provide signing services type SignerClient struct { endpoint *SignerListenerEndpoint + chainID string } var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) -func NewSignerClient(endpoint *SignerListenerEndpoint) (*SignerClient, error) { +func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { if err := endpoint.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start listener endpoint") + return nil, fmt.Errorf("failed to start listener endpoint: %w", err) } } - return &SignerClient{endpoint: endpoint}, nil + return &SignerClient{endpoint: endpoint, chainID: chainID}, nil } // Close closes the underlying connection @@ -50,16 +52,14 @@ func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { // Ping sends a ping request to the remote signer func (sc *SignerClient) Ping() error { - response, err := sc.endpoint.SendRequest(&PingRequest{}) - + response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) return nil } - _, ok := response.(*PingResponse) - if !ok { - sc.endpoint.Logger.Error("SignerClient::Ping", "err", "response != PingResponse") + pb := response.GetPingResponse() + if pb == nil { return err } @@ -69,65 +69,65 @@ func (sc *SignerClient) Ping() error { // GetPubKey retrieves a public key from a remote signer // returns an error if client is not able to provide the key func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { - response, err := sc.endpoint.SendRequest(&PubKeyRequest{}) + response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID})) if err != nil { - sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", err) - return nil, errors.Wrap(err, "send") + return nil, fmt.Errorf("send: %w", err) } - pubKeyResp, ok := response.(*PubKeyResponse) - if !ok { - sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != PubKeyResponse") - return nil, errors.Errorf("unexpected response type %T", response) + resp := response.GetPubKeyResponse() + if resp == nil { + return nil, ErrUnexpectedResponse + } + if resp.Error != nil { + return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - if pubKeyResp.Error != nil { - sc.endpoint.Logger.Error("failed to get private validator's public key", "err", pubKeyResp.Error) - return nil, fmt.Errorf("remote error: %w", pubKeyResp.Error) + pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + if err != nil { + return nil, err } - return pubKeyResp.PubKey, nil + return pk, nil } // SignVote requests a remote signer to sign a vote -func (sc *SignerClient) SignVote(chainID string, vote *types.Vote) error { - response, err := sc.endpoint.SendRequest(&SignVoteRequest{Vote: vote}) +func (sc *SignerClient) SignVote(chainID string, vote *tmproto.Vote) error { + response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.SignVoteRequest{Vote: vote, ChainId: chainID})) if err != nil { - sc.endpoint.Logger.Error("SignerClient::SignVote", "err", err) return err } - resp, ok := response.(*SignedVoteResponse) - if !ok { - sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != SignedVoteResponse") + resp := response.GetSignedVoteResponse() + if resp == nil { return ErrUnexpectedResponse } - if resp.Error != nil { - return resp.Error + return &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - *vote = *resp.Vote + + *vote = resp.Vote return nil } // SignProposal requests a remote signer to sign a proposal -func (sc *SignerClient) SignProposal(chainID string, proposal *types.Proposal) error { - response, err := sc.endpoint.SendRequest(&SignProposalRequest{Proposal: proposal}) +func (sc *SignerClient) SignProposal(chainID string, proposal *tmproto.Proposal) error { + response, err := sc.endpoint.SendRequest(mustWrapMsg( + &privvalproto.SignProposalRequest{Proposal: proposal, ChainId: chainID}, + )) if err != nil { - sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", err) return err } - resp, ok := response.(*SignedProposalResponse) - if !ok { - sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", "response != SignedProposalResponse") + resp := response.GetSignedProposalResponse() + if resp == nil { return ErrUnexpectedResponse } if resp.Error != nil { - return resp.Error + return &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - *proposal = *resp.Proposal + + *proposal = resp.Proposal return nil } diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 2156a8cf2..019fd2c96 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -8,7 +8,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -29,7 +34,7 @@ func getSignerTestCases(t *testing.T) []signerTestCase { // get a pair of signer listener, signer dialer endpoints sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) - sc, err := NewSignerClient(sl) + sc, err := NewSignerClient(sl, chainID) require.NoError(t, err) ss := NewSignerServer(sd, chainID, mockPV) @@ -61,8 +66,17 @@ func TestSignerClose(t *testing.T) { func TestSignerPing(t *testing.T) { for _, tc := range getSignerTestCases(t) { - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) err := tc.signerClient.Ping() assert.NoError(t, err) @@ -71,8 +85,17 @@ func TestSignerPing(t *testing.T) { func TestSignerGetPubKey(t *testing.T) { for _, tc := range getSignerTestCases(t) { - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) pubKey, err := tc.signerClient.GetPubKey() require.NoError(t, err) @@ -94,14 +117,38 @@ func TestSignerGetPubKey(t *testing.T) { func TestSignerProposal(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() - want := &types.Proposal{Timestamp: ts} - have := &types.Proposal{Timestamp: ts} - - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + hash := tmrand.Bytes(tmhash.Size) + have := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + } + want := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + } - require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want)) - require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have)) + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) + + require.NoError(t, tc.mockPV.SignProposal(tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignProposal(tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -110,14 +157,42 @@ func TestSignerProposal(t *testing.T) { func TestSignerVote(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} - have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) + + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -126,16 +201,44 @@ func TestSignerVote(t *testing.T) { func TestSignerVoteResetDeadline(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} - have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) // TODO(jleni): Clarify what is actually being tested @@ -143,8 +246,8 @@ func TestSignerVoteResetDeadline(t *testing.T) { // This would exceed the deadline if it was not extended by the previous message time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } } @@ -152,11 +255,39 @@ func TestSignerVoteResetDeadline(t *testing.T) { func TestSignerVoteKeepAlive(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} - have := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + } + + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) // Check that even if the client does not request a // signature for a long time. The service is still available @@ -167,8 +298,8 @@ func TestSignerVoteKeepAlive(t *testing.T) { time.Sleep(testTimeoutReadWrite * 3) tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") - require.NoError(t, tc.mockPV.SignVote(tc.chainID, want)) - require.NoError(t, tc.signerClient.SignVote(tc.chainID, have)) + require.NoError(t, tc.mockPV.SignVote(tc.chainID, want.ToProto())) + require.NoError(t, tc.signerClient.SignVote(tc.chainID, have.ToProto())) assert.Equal(t, want.Signature, have.Signature) } @@ -180,18 +311,37 @@ func TestSignerSignProposalErrors(t *testing.T) { tc.signerServer.privVal = types.NewErroringMockPV() tc.mockPV = types.NewErroringMockPV() - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) ts := time.Now() - proposal := &types.Proposal{Timestamp: ts} - err := tc.signerClient.SignProposal(tc.chainID, proposal) + hash := tmrand.Bytes(tmhash.Size) + proposal := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + Signature: []byte("signature"), + } + + err := tc.signerClient.SignProposal(tc.chainID, proposal.ToProto()) require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignProposal(tc.chainID, proposal) + err = tc.mockPV.SignProposal(tc.chainID, proposal.ToProto()) require.Error(t, err) - err = tc.signerClient.SignProposal(tc.chainID, proposal) + err = tc.signerClient.SignProposal(tc.chainID, proposal.ToProto()) require.Error(t, err) } } @@ -199,43 +349,61 @@ func TestSignerSignProposalErrors(t *testing.T) { func TestSignerSignVoteErrors(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() - vote := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + hash := tmrand.Bytes(tmhash.Size) + valAddr := tmrand.Bytes(crypto.AddressSize) + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, + ValidatorAddress: valAddr, + ValidatorIndex: 1, + Signature: []byte("signature"), + } // Replace signer service privval with one that always fails tc.signerServer.privVal = types.NewErroringMockPV() tc.mockPV = types.NewErroringMockPV() - defer tc.signerServer.Stop() - defer tc.signerClient.Close() - - err := tc.signerClient.SignVote(tc.chainID, vote) + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) + + err := tc.signerClient.SignVote(tc.chainID, vote.ToProto()) require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - err = tc.mockPV.SignVote(tc.chainID, vote) + err = tc.mockPV.SignVote(tc.chainID, vote.ToProto()) require.Error(t, err) - err = tc.signerClient.SignVote(tc.chainID, vote) + err = tc.signerClient.SignVote(tc.chainID, vote.ToProto()) require.Error(t, err) } } -func brokenHandler(privVal types.PrivValidator, request SignerMessage, chainID string) (SignerMessage, error) { - var res SignerMessage +func brokenHandler(privVal types.PrivValidator, request privvalproto.Message, + chainID string) (privvalproto.Message, error) { + var res privvalproto.Message var err error - switch r := request.(type) { - + switch r := request.Sum.(type) { // This is broken and will answer most requests with a pubkey response - case *PubKeyRequest: - res = &PubKeyResponse{nil, nil} - case *SignVoteRequest: - res = &PubKeyResponse{nil, nil} - case *SignProposalRequest: - res = &PubKeyResponse{nil, nil} - - case *PingRequest: - err, res = nil, &PingResponse{} - + case *privvalproto.Message_PubKeyRequest: + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + case *privvalproto.Message_SignVoteRequest: + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + case *privvalproto.Message_SignProposalRequest: + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: nil}) + case *privvalproto.Message_PingRequest: + err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) default: err = fmt.Errorf("unknown msg: %v", r) } @@ -250,13 +418,22 @@ func TestSignerUnexpectedResponse(t *testing.T) { tc.signerServer.SetRequestHandler(brokenHandler) - defer tc.signerServer.Stop() - defer tc.signerClient.Close() + tc := tc + t.Cleanup(func() { + if err := tc.signerServer.Stop(); err != nil { + t.Error(err) + } + }) + t.Cleanup(func() { + if err := tc.signerClient.Close(); err != nil { + t.Error(err) + } + }) ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: types.PrecommitType} + want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} - e := tc.signerClient.SignVote(tc.chainID, want) - assert.EqualError(t, e, "received unexpected response") + e := tc.signerClient.SignVote(tc.chainID, want.ToProto()) + assert.EqualError(t, e, "empty response") } } diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go index 7336f64be..93d26b043 100644 --- a/privval/signer_dialer_endpoint.go +++ b/privval/signer_dialer_endpoint.go @@ -15,24 +15,26 @@ const ( // SignerServiceEndpointOption sets an optional parameter on the SignerDialerEndpoint. type SignerServiceEndpointOption func(*SignerDialerEndpoint) -// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for connections -// from external signing processes. +// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for +// connections from client processes. func SignerDialerEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.timeoutReadWrite = timeout } } -// SignerDialerEndpointConnRetries sets the amount of attempted retries to acceptNewConnection. +// SignerDialerEndpointConnRetries sets the amount of attempted retries to +// acceptNewConnection. func SignerDialerEndpointConnRetries(retries int) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.maxConnRetries = retries } } -// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a custom value +// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a +// custom value. func SignerDialerEndpointRetryWaitInterval(interval time.Duration) SignerServiceEndpointOption { return func(ss *SignerDialerEndpoint) { ss.retryWait = interval } } -// SignerDialerEndpoint dials using its dialer and responds to any -// signature requests using its privVal. +// SignerDialerEndpoint dials using its dialer and responds to any signature +// requests using its privVal. type SignerDialerEndpoint struct { signerEndpoint @@ -48,6 +50,7 @@ type SignerDialerEndpoint struct { func NewSignerDialerEndpoint( logger log.Logger, dialer SocketDialer, + options ...SignerServiceEndpointOption, ) *SignerDialerEndpoint { sd := &SignerDialerEndpoint{ @@ -59,6 +62,10 @@ func NewSignerDialerEndpoint( sd.BaseService = *service.NewBaseService(logger, "SignerDialerEndpoint", sd) sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + for _, optionFunc := range options { + optionFunc(sd) + } + return sd } diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index 98c64fb89..eb2ed442f 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -3,22 +3,22 @@ package privval import ( "fmt" "net" - "sync" "time" - "github.com/pkg/errors" - + "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" ) const ( - defaultTimeoutReadWriteSeconds = 3 + defaultTimeoutReadWriteSeconds = 5 ) type signerEndpoint struct { service.BaseService - connMtx sync.Mutex + connMtx tmsync.Mutex conn net.Conn timeoutReadWrite time.Duration @@ -80,14 +80,13 @@ func (se *signerEndpoint) DropConnection() { } // ReadMessage reads a message from the endpoint -func (se *signerEndpoint) ReadMessage() (msg SignerMessage, err error) { +func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { se.connMtx.Lock() defer se.connMtx.Unlock() if !se.isConnected() { - return nil, fmt.Errorf("endpoint is not connected") + return msg, fmt.Errorf("endpoint is not connected: %w", ErrNoConnection) } - // Reset read deadline deadline := time.Now().Add(se.timeoutReadWrite) @@ -95,15 +94,16 @@ func (se *signerEndpoint) ReadMessage() (msg SignerMessage, err error) { if err != nil { return } - const maxRemoteSignerMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryLengthPrefixedReader(se.conn, &msg, maxRemoteSignerMsgSize) + protoReader := protoio.NewDelimitedReader(se.conn, maxRemoteSignerMsgSize) + err = protoReader.ReadMsg(&msg) if _, ok := err.(timeoutError); ok { if err != nil { - err = errors.Wrap(ErrReadTimeout, err.Error()) + err = fmt.Errorf("%v: %w", err, ErrReadTimeout) } else { - err = errors.Wrap(ErrReadTimeout, "Empty error") + err = fmt.Errorf("empty error: %w", ErrReadTimeout) } + se.Logger.Debug("Dropping [read]", "obj", se) se.dropConnection() } @@ -112,14 +112,16 @@ func (se *signerEndpoint) ReadMessage() (msg SignerMessage, err error) { } // WriteMessage writes a message from the endpoint -func (se *signerEndpoint) WriteMessage(msg SignerMessage) (err error) { +func (se *signerEndpoint) WriteMessage(msg privvalproto.Message) (err error) { se.connMtx.Lock() defer se.connMtx.Unlock() if !se.isConnected() { - return errors.Wrap(ErrNoConnection, "endpoint is not connected") + return fmt.Errorf("endpoint is not connected: %w", ErrNoConnection) } + protoWriter := protoio.NewDelimitedWriter(se.conn) + // Reset read deadline deadline := time.Now().Add(se.timeoutReadWrite) err = se.conn.SetWriteDeadline(deadline) @@ -127,12 +129,12 @@ func (se *signerEndpoint) WriteMessage(msg SignerMessage) (err error) { return } - _, err = cdc.MarshalBinaryLengthPrefixedWriter(se.conn, msg) + _, err = protoWriter.WriteMsg(&msg) if _, ok := err.(timeoutError); ok { if err != nil { - err = errors.Wrap(ErrWriteTimeout, err.Error()) + err = fmt.Errorf("%v: %w", err, ErrWriteTimeout) } else { - err = errors.Wrap(ErrWriteTimeout, "Empty error") + err = fmt.Errorf("empty error: %w", ErrWriteTimeout) } se.dropConnection() } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 70a23181d..fefa68317 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -3,18 +3,30 @@ package privval import ( "fmt" "net" - "sync" "time" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" ) -// SignerValidatorEndpointOption sets an optional parameter on the SocketVal. -type SignerValidatorEndpointOption func(*SignerListenerEndpoint) +// SignerListenerEndpointOption sets an optional parameter on the SignerListenerEndpoint. +type SignerListenerEndpointOption func(*SignerListenerEndpoint) -// SignerListenerEndpoint listens for an external process to dial in -// and keeps the connection alive by dropping and reconnecting +// SignerListenerEndpointTimeoutReadWrite sets the read and write timeout for +// connections from external signing processes. +// +// Default: 5s +func SignerListenerEndpointTimeoutReadWrite(timeout time.Duration) SignerListenerEndpointOption { + return func(sl *SignerListenerEndpoint) { sl.signerEndpoint.timeoutReadWrite = timeout } +} + +// SignerListenerEndpoint listens for an external process to dial in and keeps +// the connection alive by dropping and reconnecting. +// +// The process will send pings every ~3s (read/write timeout * 2/3) to keep the +// connection alive. type SignerListenerEndpoint struct { signerEndpoint @@ -24,23 +36,30 @@ type SignerListenerEndpoint struct { timeoutAccept time.Duration pingTimer *time.Ticker + pingInterval time.Duration - instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest + instanceMtx tmsync.Mutex // Ensures instance public methods access, i.e. SendRequest } // NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. func NewSignerListenerEndpoint( logger log.Logger, listener net.Listener, + options ...SignerListenerEndpointOption, ) *SignerListenerEndpoint { - sc := &SignerListenerEndpoint{ + sl := &SignerListenerEndpoint{ listener: listener, timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, } - sc.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sc) - sc.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second - return sc + sl.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sl) + sl.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second + + for _, optionFunc := range options { + optionFunc(sl) + } + + return sl } // OnStart implements service.Service. @@ -48,7 +67,9 @@ func (sl *SignerListenerEndpoint) OnStart() error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) - sl.pingTimer = time.NewTicker(defaultPingPeriodMilliseconds * time.Millisecond) + // NOTE: ping timeout must be less than read/write timeout + sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond + sl.pingTimer = time.NewTicker(sl.pingInterval) go sl.serviceLoop() go sl.pingLoop() @@ -83,7 +104,7 @@ func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error } // SendRequest ensures there is a connection, sends a request and waits for a response -func (sl *SignerListenerEndpoint) SendRequest(request SignerMessage) (SignerMessage, error) { +func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*privvalproto.Message, error) { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() @@ -102,7 +123,10 @@ func (sl *SignerListenerEndpoint) SendRequest(request SignerMessage) (SignerMess return nil, err } - return res, nil + // Reset pingTimer to avoid sending unnecessary pings. + sl.pingTimer.Reset(sl.pingInterval) + + return &res, nil } func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error { @@ -116,6 +140,7 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error } // block until connected or timeout + sl.Logger.Info("SignerListener: Blocking for connection") sl.triggerConnect() err := sl.WaitConnection(sl.connectionAvailableCh, maxWait) if err != nil { @@ -185,7 +210,7 @@ func (sl *SignerListenerEndpoint) pingLoop() { select { case <-sl.pingTimer.C: { - _, err := sl.SendRequest(&PingRequest{}) + _, err := sl.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { sl.Logger.Error("SignerListener: Ping timeout") sl.triggerReconnect() diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index fbb511d24..cbd45e6ce 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -73,7 +73,11 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { err = signerServer.Start() require.NoError(t, err) - defer signerServer.Stop() + t.Cleanup(func() { + if err := signerServer.Stop(); err != nil { + t.Error(err) + } + }) select { case attempts := <-attemptCh: @@ -104,12 +108,18 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) - defer listenerEndpoint.Stop() + t.Cleanup(func() { + if err := listenerEndpoint.Stop(); err != nil { + t.Error(err) + } + }) require.NoError(t, signerServer.Start()) assert.True(t, signerServer.IsRunning()) <-endpointIsOpenCh - signerServer.Stop() + if err := signerServer.Stop(); err != nil { + t.Error(err) + } dialerEndpoint2 := NewSignerDialerEndpoint( logger, @@ -120,7 +130,11 @@ func TestRetryConnToRemoteSigner(t *testing.T) { // let some pings pass require.NoError(t, signerServer2.Start()) assert.True(t, signerServer2.IsRunning()) - defer signerServer2.Stop() + t.Cleanup(func() { + if err := signerServer2.Stop(); err != nil { + t.Error(err) + } + }) // give the client some time to re-establish the conn to the remote signer // should see sth like this in the logs: @@ -131,8 +145,6 @@ func TestRetryConnToRemoteSigner(t *testing.T) { } } -/////////////////////////////////// - func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { proto, address := tmnet.ProtocolAndAddress(addr) @@ -156,7 +168,11 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite listener = tcpLn } - return NewSignerListenerEndpoint(logger, listener) + return NewSignerListenerEndpoint( + logger, + listener, + SignerListenerEndpointTimeoutReadWrite(testTimeoutReadWrite), + ) } func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index c658abdfd..682863b19 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -4,45 +4,87 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) func DefaultValidationRequestHandler( privVal types.PrivValidator, - req SignerMessage, + req privvalproto.Message, chainID string, -) (SignerMessage, error) { - var res SignerMessage - var err error +) (privvalproto.Message, error) { + var ( + res privvalproto.Message + err error + ) + + switch r := req.Sum.(type) { + case *privvalproto.Message_PubKeyRequest: + if r.PubKeyRequest.GetChainId() != chainID { + res = mustWrapMsg(&privvalproto.PubKeyResponse{ + PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{ + Code: 0, Description: "unable to provide pubkey"}}) + return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.PubKeyRequest.GetChainId(), chainID) + } - switch r := req.(type) { - case *PubKeyRequest: var pubKey crypto.PubKey pubKey, err = privVal.GetPubKey() if err != nil { - res = &PubKeyResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &PubKeyResponse{pubKey, nil} + return res, err + } + pk, err := cryptoenc.PubKeyToProto(pubKey) + if err != nil { + return res, err } - case *SignVoteRequest: - err = privVal.SignVote(chainID, r.Vote) if err != nil { - res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + res = mustWrapMsg(&privvalproto.PubKeyResponse{ + PubKey: cryptoproto.PublicKey{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) } else { - res = &SignedVoteResponse{r.Vote, nil} + res = mustWrapMsg(&privvalproto.PubKeyResponse{PubKey: pk, Error: nil}) + } + + case *privvalproto.Message_SignVoteRequest: + if r.SignVoteRequest.ChainId != chainID { + res = mustWrapMsg(&privvalproto.SignedVoteResponse{ + Vote: tmproto.Vote{}, Error: &privvalproto.RemoteSignerError{ + Code: 0, Description: "unable to sign vote"}}) + return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignVoteRequest.GetChainId(), chainID) } - case *SignProposalRequest: - err = privVal.SignProposal(chainID, r.Proposal) + vote := r.SignVoteRequest.Vote + + err = privVal.SignVote(chainID, vote) if err != nil { - res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + res = mustWrapMsg(&privvalproto.SignedVoteResponse{ + Vote: tmproto.Vote{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) } else { - res = &SignedProposalResponse{r.Proposal, nil} + res = mustWrapMsg(&privvalproto.SignedVoteResponse{Vote: *vote, Error: nil}) + } + + case *privvalproto.Message_SignProposalRequest: + if r.SignProposalRequest.GetChainId() != chainID { + res = mustWrapMsg(&privvalproto.SignedProposalResponse{ + Proposal: tmproto.Proposal{}, Error: &privvalproto.RemoteSignerError{ + Code: 0, + Description: "unable to sign proposal"}}) + return res, fmt.Errorf("want chainID: %s, got chainID: %s", r.SignProposalRequest.GetChainId(), chainID) } - case *PingRequest: - err, res = nil, &PingResponse{} + proposal := r.SignProposalRequest.Proposal + + err = privVal.SignProposal(chainID, proposal) + if err != nil { + res = mustWrapMsg(&privvalproto.SignedProposalResponse{ + Proposal: tmproto.Proposal{}, Error: &privvalproto.RemoteSignerError{Code: 0, Description: err.Error()}}) + } else { + res = mustWrapMsg(&privvalproto.SignedProposalResponse{Proposal: *proposal, Error: nil}) + } + case *privvalproto.Message_PingRequest: + err, res = nil, mustWrapMsg(&privvalproto.PingResponse{}) default: err = fmt.Errorf("unknown msg: %v", r) diff --git a/privval/signer_server.go b/privval/signer_server.go index 242423b24..c14524e36 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -2,17 +2,18 @@ package privval import ( "io" - "sync" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" + privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" ) // ValidationRequestHandlerFunc handles different remoteSigner requests type ValidationRequestHandlerFunc func( privVal types.PrivValidator, - requestMessage SignerMessage, - chainID string) (SignerMessage, error) + requestMessage privvalproto.Message, + chainID string) (privvalproto.Message, error) type SignerServer struct { service.BaseService @@ -21,7 +22,7 @@ type SignerServer struct { chainID string privVal types.PrivValidator - handlerMtx sync.Mutex + handlerMtx tmsync.Mutex validationRequestHandler ValidationRequestHandlerFunc } @@ -70,7 +71,7 @@ func (ss *SignerServer) servicePendingRequest() { return } - var res SignerMessage + var res privvalproto.Message { // limit the scope of the lock ss.handlerMtx.Lock() @@ -82,11 +83,9 @@ func (ss *SignerServer) servicePendingRequest() { } } - if res != nil { - err = ss.endpoint.WriteMessage(res) - if err != nil { - ss.Logger.Error("SignerServer: writeMessage", "err", err) - } + err = ss.endpoint.WriteMessage(res) + if err != nil { + ss.Logger.Error("SignerServer: writeMessage", "err", err) } } diff --git a/privval/socket_dialers.go b/privval/socket_dialers.go index f9e5c7879..9be84e02d 100644 --- a/privval/socket_dialers.go +++ b/privval/socket_dialers.go @@ -1,14 +1,12 @@ package privval import ( + "errors" "net" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto" tmnet "github.com/tendermint/tendermint/libs/net" - p2pconn "github.com/tendermint/tendermint/p2p/conn" ) // Socket errors. @@ -29,7 +27,7 @@ func DialTCPFn(addr string, timeoutReadWrite time.Duration, privKey crypto.PrivK err = conn.SetDeadline(deadline) } if err == nil { - conn, err = p2pconn.MakeSecretConnection(conn, privKey) + conn, err = MakeSecretConnection(conn, privKey) } return conn, err } diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index d7b372b85..32c07c591 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -44,6 +43,6 @@ func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) - err = errors.Wrap(ErrConnectionTimeout, err.Error()) + err = fmt.Errorf("%v: %w", err, ErrConnectionTimeout) assert.True(t, IsConnTimeout(err)) } diff --git a/privval/socket_listeners.go b/privval/socket_listeners.go index 0e12e4268..a6d031f8c 100644 --- a/privval/socket_listeners.go +++ b/privval/socket_listeners.go @@ -5,12 +5,10 @@ import ( "time" "github.com/tendermint/tendermint/crypto/ed25519" - p2pconn "github.com/tendermint/tendermint/p2p/conn" ) const ( - defaultTimeoutAcceptSeconds = 3 - defaultPingPeriodMilliseconds = 100 + defaultTimeoutAcceptSeconds = 3 ) // timeoutError can be used to check if an error returned from the netp package @@ -45,7 +43,7 @@ var _ net.Listener = (*TCPListener)(nil) type TCPListener struct { *net.TCPListener - secretConnKey ed25519.PrivKeyEd25519 + secretConnKey ed25519.PrivKey timeoutAccept time.Duration timeoutReadWrite time.Duration @@ -53,7 +51,7 @@ type TCPListener struct { // NewTCPListener returns a listener that accepts authenticated encrypted connections // using the given secretConnKey and the default timeout values. -func NewTCPListener(ln net.Listener, secretConnKey ed25519.PrivKeyEd25519) *TCPListener { +func NewTCPListener(ln net.Listener, secretConnKey ed25519.PrivKey) *TCPListener { return &TCPListener{ TCPListener: ln.(*net.TCPListener), secretConnKey: secretConnKey, @@ -77,7 +75,7 @@ func (ln *TCPListener) Accept() (net.Conn, error) { // Wrap the conn in our timeout and encryption wrappers timeoutConn := newTimeoutConn(tc, ln.timeoutReadWrite) - secretConn, err := p2pconn.MakeSecretConnection(timeoutConn, ln.secretConnKey) + secretConn, err := MakeSecretConnection(timeoutConn, ln.secretConnKey) if err != nil { return nil, err } diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 3c4cb8588..5e95ec10c 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -13,7 +13,7 @@ import ( //------------------------------------------- // helper funcs -func newPrivKey() ed25519.PrivKeyEd25519 { +func newPrivKey() ed25519.PrivKey { return ed25519.GenPrivKey() } diff --git a/privval/utils.go b/privval/utils.go index 64f4f8c2f..0b8cced34 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -1,11 +1,10 @@ package privval import ( + "errors" "fmt" "net" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -15,10 +14,11 @@ import ( // report that a connection timeout occurred. This detects both fundamental // network timeouts, as well as ErrConnTimeout errors. func IsConnTimeout(err error) bool { - switch errors.Cause(err).(type) { - case EndpointTimeoutError: + _, ok := errors.Unwrap(err).(timeoutError) + switch { + case errors.As(err, &EndpointTimeoutError{}): return true - case timeoutError: + case ok: return true default: return false diff --git a/privval/utils_test.go b/privval/utils_test.go index 5648efec5..468b6d12f 100644 --- a/privval/utils_test.go +++ b/privval/utils_test.go @@ -1,13 +1,14 @@ package privval import ( + "errors" + "fmt" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) func TestIsConnTimeoutForNonTimeoutErrors(t *testing.T) { - assert.False(t, IsConnTimeout(errors.Wrap(ErrDialRetryMax, "max retries exceeded"))) + assert.False(t, IsConnTimeout(fmt.Errorf("max retries exceeded: %w", ErrDialRetryMax))) assert.False(t, IsConnTimeout(errors.New("completely irrelevant error"))) } diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto new file mode 100644 index 000000000..e4a1dd1bc --- /dev/null +++ b/proto/tendermint/abci/types.proto @@ -0,0 +1,391 @@ +syntax = "proto3"; +package tendermint.abci; + +option go_package = "github.com/tendermint/tendermint/abci/types"; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +import "tendermint/crypto/proof.proto"; +import "tendermint/types/types.proto"; +import "tendermint/crypto/keys.proto"; +import "tendermint/types/params.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; + +// This file is copied from http://github.com/tendermint/abci +// NOTE: When using custom types, mind the warnings. +// https://github.com/gogo/protobuf/blob/master/custom_types.md#warnings-and-issues + +//---------------------------------------- +// Request types + +message Request { + oneof value { + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 4; + RequestQuery query = 5; + RequestBeginBlock begin_block = 6; + RequestCheckTx check_tx = 7; + RequestDeliverTx deliver_tx = 8; + RequestEndBlock end_block = 9; + RequestCommit commit = 10; + RequestListSnapshots list_snapshots = 11; + RequestOfferSnapshot offer_snapshot = 12; + RequestLoadSnapshotChunk load_snapshot_chunk = 13; + RequestApplySnapshotChunk apply_snapshot_chunk = 14; + } +} + +message RequestEcho { + string message = 1; +} + +message RequestFlush {} + +message RequestInfo { + string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; +} + +message RequestInitChain { + google.protobuf.Timestamp time = 1 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 2; + ConsensusParams consensus_params = 3; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable) = false]; + bytes app_state_bytes = 5; + int64 initial_height = 6; +} + +message RequestQuery { + bytes data = 1; + string path = 2; + int64 height = 3; + bool prove = 4; +} + +message RequestBeginBlock { + bytes hash = 1; + tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; + LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; +} + +enum CheckTxType { + NEW = 0 [(gogoproto.enumvalue_customname) = "New"]; + RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"]; +} + +message RequestCheckTx { + bytes tx = 1; + CheckTxType type = 2; +} + +message RequestDeliverTx { + bytes tx = 1; +} + +message RequestEndBlock { + int64 height = 1; +} + +message RequestCommit {} + +// lists available snapshots +message RequestListSnapshots { +} + +// offers a snapshot to the application +message RequestOfferSnapshot { + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height +} + +// loads a snapshot chunk +message RequestLoadSnapshotChunk { + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; +} + +// Applies a snapshot chunk +message RequestApplySnapshotChunk { + uint32 index = 1; + bytes chunk = 2; + string sender = 3; +} + +//---------------------------------------- +// Response types + +message Response { + oneof value { + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseInitChain init_chain = 5; + ResponseQuery query = 6; + ResponseBeginBlock begin_block = 7; + ResponseCheckTx check_tx = 8; + ResponseDeliverTx deliver_tx = 9; + ResponseEndBlock end_block = 10; + ResponseCommit commit = 11; + ResponseListSnapshots list_snapshots = 12; + ResponseOfferSnapshot offer_snapshot = 13; + ResponseLoadSnapshotChunk load_snapshot_chunk = 14; + ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + } +} + +// nondeterministic +message ResponseException { + string error = 1; +} + +message ResponseEcho { + string message = 1; +} + +message ResponseFlush {} + +message ResponseInfo { + string data = 1; + + // this is the software version of the application. TODO: remove? + string version = 2; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; +} + +message ResponseInitChain { + ConsensusParams consensus_params = 1; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable) = false]; + bytes app_hash = 3; +} + +message ResponseQuery { + uint32 code = 1; + // bytes data = 2; // use "value" instead. + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 index = 5; + bytes key = 6; + bytes value = 7; + tendermint.crypto.ProofOps proof_ops = 8; + int64 height = 9; + string codespace = 10; +} + +message ResponseBeginBlock { + repeated Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +message ResponseCheckTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; +} + +message ResponseDeliverTx { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5 [json_name = "gas_wanted"]; + int64 gas_used = 6 [json_name = "gas_used"]; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + string codespace = 8; +} + +message ResponseEndBlock { + repeated ValidatorUpdate validator_updates = 1 + [(gogoproto.nullable) = false]; + ConsensusParams consensus_param_updates = 2; + repeated Event events = 3 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; +} + +message ResponseCommit { + // reserve 1 + bytes data = 2; + int64 retain_height = 3; +} + +message ResponseListSnapshots { + repeated Snapshot snapshots = 1; +} + +message ResponseOfferSnapshot { + Result result = 1; + + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Snapshot accepted, apply chunks + ABORT = 2; // Abort all snapshot restoration + REJECT = 3; // Reject this specific snapshot, try others + REJECT_FORMAT = 4; // Reject all snapshots of this format, try others + REJECT_SENDER = 5; // Reject all snapshots from the sender(s), try others + } +} + +message ResponseLoadSnapshotChunk { + bytes chunk = 1; +} + +message ResponseApplySnapshotChunk { + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply + repeated string reject_senders = 3; // Chunk senders to reject and ban + + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Chunk successfully accepted + ABORT = 2; // Abort all snapshot restoration + RETRY = 3; // Retry chunk (combine with refetch and reject) + RETRY_SNAPSHOT = 4; // Retry snapshot (combine with refetch and reject) + REJECT_SNAPSHOT = 5; // Reject this snapshot, try others + } +} + +//---------------------------------------- +// Misc. + +// ConsensusParams contains all consensus-relevant parameters +// that can be adjusted by the abci app +message ConsensusParams { + BlockParams block = 1; + tendermint.types.EvidenceParams evidence = 2; + tendermint.types.ValidatorParams validator = 3; + tendermint.types.VersionParams version = 4; +} + +// BlockParams contains limits on the block size. +message BlockParams { + // Note: must be greater than 0 + int64 max_bytes = 1; + // Note: must be greater or equal to -1 + int64 max_gas = 2; +} + +message LastCommitInfo { + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +message Event { + string type = 1; + repeated EventAttribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes,omitempty" + ]; +} + +// EventAttribute is a single key-value pair, associated with an event. +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +message TxResult { + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; +} + +//---------------------------------------- +// Blockchain Types + +// Validator +message Validator { + bytes address = 1; // The first 20 bytes of SHA256(public key) + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + int64 power = 3; // The voting power +} + +// ValidatorUpdate +message ValidatorUpdate { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + int64 power = 2; +} + +// VoteInfo +message VoteInfo { + Validator validator = 1 [(gogoproto.nullable) = false]; + bool signed_last_block = 2; +} + +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} + +message Evidence { + EvidenceType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [ + (gogoproto.nullable) = false, + (gogoproto.stdtime) = true + ]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} + +//---------------------------------------- +// State Sync Types + +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash, equal only if identical + bytes metadata = 5; // Arbitrary application metadata +} + +//---------------------------------------- +// Service Definition + +service ABCIApplication { + rpc Echo(RequestEcho) returns (ResponseEcho); + rpc Flush(RequestFlush) returns (ResponseFlush); + rpc Info(RequestInfo) returns (ResponseInfo); + rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); + rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); + rpc Query(RequestQuery) returns (ResponseQuery); + rpc Commit(RequestCommit) returns (ResponseCommit); + rpc InitChain(RequestInitChain) returns (ResponseInitChain); + rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); + rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); + rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); + rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); + rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); + rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); +} diff --git a/proto/tendermint/blockchain/types.pb.go b/proto/tendermint/blockchain/types.pb.go new file mode 100644 index 000000000..cc6481a5e --- /dev/null +++ b/proto/tendermint/blockchain/types.pb.go @@ -0,0 +1,1537 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/blockchain/types.proto + +package blockchain + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + types "github.com/tendermint/tendermint/proto/tendermint/types" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BlockRequest requests a block for a specific height +type BlockRequest struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *BlockRequest) Reset() { *m = BlockRequest{} } +func (m *BlockRequest) String() string { return proto.CompactTextString(m) } +func (*BlockRequest) ProtoMessage() {} +func (*BlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{0} +} +func (m *BlockRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockRequest.Merge(m, src) +} +func (m *BlockRequest) XXX_Size() int { + return m.Size() +} +func (m *BlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockRequest proto.InternalMessageInfo + +func (m *BlockRequest) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// NoBlockResponse informs the node that the peer does not have block at the requested height +type NoBlockResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *NoBlockResponse) Reset() { *m = NoBlockResponse{} } +func (m *NoBlockResponse) String() string { return proto.CompactTextString(m) } +func (*NoBlockResponse) ProtoMessage() {} +func (*NoBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{1} +} +func (m *NoBlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NoBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NoBlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NoBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NoBlockResponse.Merge(m, src) +} +func (m *NoBlockResponse) XXX_Size() int { + return m.Size() +} +func (m *NoBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NoBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NoBlockResponse proto.InternalMessageInfo + +func (m *NoBlockResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +// BlockResponse returns block to the requested +type BlockResponse struct { + Block *types.Block `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` +} + +func (m *BlockResponse) Reset() { *m = BlockResponse{} } +func (m *BlockResponse) String() string { return proto.CompactTextString(m) } +func (*BlockResponse) ProtoMessage() {} +func (*BlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{2} +} +func (m *BlockResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockResponse.Merge(m, src) +} +func (m *BlockResponse) XXX_Size() int { + return m.Size() +} +func (m *BlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockResponse proto.InternalMessageInfo + +func (m *BlockResponse) GetBlock() *types.Block { + if m != nil { + return m.Block + } + return nil +} + +// StatusRequest requests the status of a peer. +type StatusRequest struct { +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{3} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +// StatusResponse is a peer response to inform their status. +type StatusResponse struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Base int64 `protobuf:"varint,2,opt,name=base,proto3" json:"base,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{4} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *StatusResponse) GetBase() int64 { + if m != nil { + return m.Base + } + return 0 +} + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_BlockRequest + // *Message_NoBlockResponse + // *Message_BlockResponse + // *Message_StatusRequest + // *Message_StatusResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_2927480384e78499, []int{5} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_BlockRequest struct { + BlockRequest *BlockRequest `protobuf:"bytes,1,opt,name=block_request,json=blockRequest,proto3,oneof" json:"block_request,omitempty"` +} +type Message_NoBlockResponse struct { + NoBlockResponse *NoBlockResponse `protobuf:"bytes,2,opt,name=no_block_response,json=noBlockResponse,proto3,oneof" json:"no_block_response,omitempty"` +} +type Message_BlockResponse struct { + BlockResponse *BlockResponse `protobuf:"bytes,3,opt,name=block_response,json=blockResponse,proto3,oneof" json:"block_response,omitempty"` +} +type Message_StatusRequest struct { + StatusRequest *StatusRequest `protobuf:"bytes,4,opt,name=status_request,json=statusRequest,proto3,oneof" json:"status_request,omitempty"` +} +type Message_StatusResponse struct { + StatusResponse *StatusResponse `protobuf:"bytes,5,opt,name=status_response,json=statusResponse,proto3,oneof" json:"status_response,omitempty"` +} + +func (*Message_BlockRequest) isMessage_Sum() {} +func (*Message_NoBlockResponse) isMessage_Sum() {} +func (*Message_BlockResponse) isMessage_Sum() {} +func (*Message_StatusRequest) isMessage_Sum() {} +func (*Message_StatusResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetBlockRequest() *BlockRequest { + if x, ok := m.GetSum().(*Message_BlockRequest); ok { + return x.BlockRequest + } + return nil +} + +func (m *Message) GetNoBlockResponse() *NoBlockResponse { + if x, ok := m.GetSum().(*Message_NoBlockResponse); ok { + return x.NoBlockResponse + } + return nil +} + +func (m *Message) GetBlockResponse() *BlockResponse { + if x, ok := m.GetSum().(*Message_BlockResponse); ok { + return x.BlockResponse + } + return nil +} + +func (m *Message) GetStatusRequest() *StatusRequest { + if x, ok := m.GetSum().(*Message_StatusRequest); ok { + return x.StatusRequest + } + return nil +} + +func (m *Message) GetStatusResponse() *StatusResponse { + if x, ok := m.GetSum().(*Message_StatusResponse); ok { + return x.StatusResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_BlockRequest)(nil), + (*Message_NoBlockResponse)(nil), + (*Message_BlockResponse)(nil), + (*Message_StatusRequest)(nil), + (*Message_StatusResponse)(nil), + } +} + +func init() { + proto.RegisterType((*BlockRequest)(nil), "tendermint.blockchain.BlockRequest") + proto.RegisterType((*NoBlockResponse)(nil), "tendermint.blockchain.NoBlockResponse") + proto.RegisterType((*BlockResponse)(nil), "tendermint.blockchain.BlockResponse") + proto.RegisterType((*StatusRequest)(nil), "tendermint.blockchain.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "tendermint.blockchain.StatusResponse") + proto.RegisterType((*Message)(nil), "tendermint.blockchain.Message") +} + +func init() { proto.RegisterFile("tendermint/blockchain/types.proto", fileDescriptor_2927480384e78499) } + +var fileDescriptor_2927480384e78499 = []byte{ + // 370 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xc1, 0x4e, 0xfa, 0x40, + 0x10, 0xc6, 0xdb, 0x7f, 0x81, 0x7f, 0x32, 0x50, 0x1a, 0x9b, 0xa8, 0xc4, 0x98, 0x46, 0xab, 0x12, + 0x3d, 0xd8, 0x26, 0x78, 0x25, 0x1e, 0x38, 0x11, 0x13, 0x8c, 0xa9, 0xc6, 0x83, 0x17, 0xd2, 0xe2, + 0x86, 0x36, 0x4a, 0x17, 0xd9, 0xed, 0xc1, 0xb7, 0xf0, 0x19, 0x7c, 0x1a, 0x8f, 0x1c, 0x3d, 0x1a, + 0x78, 0x11, 0xc3, 0x6c, 0x29, 0x4b, 0x03, 0xf5, 0xb6, 0x3b, 0xfd, 0xe6, 0x37, 0xdf, 0x7e, 0x99, + 0xc2, 0x31, 0x27, 0xf1, 0x33, 0x99, 0x8c, 0xa2, 0x98, 0xbb, 0xc1, 0x2b, 0x1d, 0xbc, 0x0c, 0x42, + 0x3f, 0x8a, 0x5d, 0xfe, 0x3e, 0x26, 0xcc, 0x19, 0x4f, 0x28, 0xa7, 0xe6, 0xee, 0x4a, 0xe2, 0xac, + 0x24, 0x07, 0x87, 0x52, 0x27, 0xca, 0x45, 0xbf, 0x68, 0xb2, 0x9b, 0x50, 0xeb, 0x2c, 0xae, 0x1e, + 0x79, 0x4b, 0x08, 0xe3, 0xe6, 0x1e, 0x54, 0x42, 0x12, 0x0d, 0x43, 0xde, 0x50, 0x8f, 0xd4, 0x73, + 0xcd, 0x4b, 0x6f, 0xf6, 0x05, 0x18, 0xb7, 0x34, 0x55, 0xb2, 0x31, 0x8d, 0x19, 0xd9, 0x2a, 0xbd, + 0x06, 0x7d, 0x5d, 0x78, 0x09, 0x65, 0x1c, 0x89, 0xba, 0x6a, 0x6b, 0xdf, 0x91, 0x8c, 0x8a, 0x07, + 0x08, 0xbd, 0x50, 0xd9, 0x06, 0xe8, 0xf7, 0xdc, 0xe7, 0x09, 0x4b, 0x3d, 0xd9, 0x6d, 0xa8, 0x2f, + 0x0b, 0xc5, 0xa3, 0x4d, 0x13, 0x4a, 0x81, 0xcf, 0x48, 0xe3, 0x1f, 0x56, 0xf1, 0x6c, 0x7f, 0x6a, + 0xf0, 0xbf, 0x47, 0x18, 0xf3, 0x87, 0xc4, 0xbc, 0x01, 0x1d, 0x67, 0xf4, 0x27, 0x02, 0x9d, 0x3a, + 0x3a, 0x71, 0x36, 0x46, 0xe7, 0xc8, 0xc9, 0x74, 0x15, 0xaf, 0x16, 0xc8, 0x49, 0x3d, 0xc0, 0x4e, + 0x4c, 0xfb, 0x4b, 0x9c, 0x30, 0x86, 0x83, 0xab, 0xad, 0xe6, 0x16, 0x5e, 0x2e, 0xc1, 0xae, 0xe2, + 0x19, 0x71, 0x2e, 0xd4, 0x1e, 0xd4, 0x73, 0x48, 0x0d, 0x91, 0xa7, 0xc5, 0x16, 0x33, 0xa0, 0x1e, + 0xe4, 0x71, 0x0c, 0xa3, 0xcb, 0x5e, 0x5c, 0x2a, 0xc4, 0xad, 0x05, 0xbf, 0xc0, 0x31, 0xb9, 0x60, + 0xde, 0x81, 0x91, 0xe1, 0x52, 0x7b, 0x65, 0xe4, 0x9d, 0xfd, 0xc1, 0xcb, 0xfc, 0xd5, 0xd9, 0x5a, + 0xa5, 0x53, 0x06, 0x8d, 0x25, 0xa3, 0xce, 0xe3, 0xd7, 0xcc, 0x52, 0xa7, 0x33, 0x4b, 0xfd, 0x99, + 0x59, 0xea, 0xc7, 0xdc, 0x52, 0xa6, 0x73, 0x4b, 0xf9, 0x9e, 0x5b, 0xca, 0x53, 0x7b, 0x18, 0xf1, + 0x30, 0x09, 0x9c, 0x01, 0x1d, 0xb9, 0xf2, 0x26, 0xaf, 0x8e, 0xb8, 0xc8, 0xee, 0xc6, 0xff, 0x23, + 0xa8, 0xe0, 0xc7, 0xab, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x59, 0x07, 0xbd, 0x3f, 0x03, + 0x00, 0x00, +} + +func (m *BlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NoBlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NoBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NoBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Block != nil { + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Base != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Base)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_BlockRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockRequest != nil { + { + size, err := m.BlockRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_NoBlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NoBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NoBlockResponse != nil { + { + size, err := m.NoBlockResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_BlockResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockResponse != nil { + { + size, err := m.BlockResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusRequest != nil { + { + size, err := m.StatusRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StatusResponse != nil { + { + size, err := m.StatusResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *NoBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *BlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Base != 0 { + n += 1 + sovTypes(uint64(m.Base)) + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_BlockRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockRequest != nil { + l = m.BlockRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_NoBlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NoBlockResponse != nil { + l = m.NoBlockResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_BlockResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockResponse != nil { + l = m.BlockResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusRequest != nil { + l = m.StatusRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusResponse != nil { + l = m.StatusResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NoBlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NoBlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NoBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Block == nil { + m.Block = &types.Block{} + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType) + } + m.Base = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Base |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoBlockResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NoBlockResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NoBlockResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockResponse{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StatusRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_StatusRequest{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StatusResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_StatusResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/blockchain/types.proto b/proto/tendermint/blockchain/types.proto new file mode 100644 index 000000000..f5c143cf5 --- /dev/null +++ b/proto/tendermint/blockchain/types.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package tendermint.blockchain; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/blockchain"; + +import "tendermint/types/block.proto"; + +// BlockRequest requests a block for a specific height +message BlockRequest { + int64 height = 1; +} + +// NoBlockResponse informs the node that the peer does not have block at the requested height +message NoBlockResponse { + int64 height = 1; +} + +// BlockResponse returns block to the requested +message BlockResponse { + tendermint.types.Block block = 1; +} + +// StatusRequest requests the status of a peer. +message StatusRequest { +} + +// StatusResponse is a peer response to inform their status. +message StatusResponse { + int64 height = 1; + int64 base = 2; +} + +message Message { + oneof sum { + BlockRequest block_request = 1; + NoBlockResponse no_block_response = 2; + BlockResponse block_response = 3; + StatusRequest status_request = 4; + StatusResponse status_response = 5; + } +} diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go new file mode 100644 index 000000000..67efd1c2c --- /dev/null +++ b/proto/tendermint/consensus/types.pb.go @@ -0,0 +1,3455 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/consensus/types.proto + +package consensus + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + bits "github.com/tendermint/tendermint/proto/tendermint/libs/bits" + types "github.com/tendermint/tendermint/proto/tendermint/types" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// NewRoundStep is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStep struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Step uint32 `protobuf:"varint,3,opt,name=step,proto3" json:"step,omitempty"` + SecondsSinceStartTime int64 `protobuf:"varint,4,opt,name=seconds_since_start_time,json=secondsSinceStartTime,proto3" json:"seconds_since_start_time,omitempty"` + LastCommitRound int32 `protobuf:"varint,5,opt,name=last_commit_round,json=lastCommitRound,proto3" json:"last_commit_round,omitempty"` +} + +func (m *NewRoundStep) Reset() { *m = NewRoundStep{} } +func (m *NewRoundStep) String() string { return proto.CompactTextString(m) } +func (*NewRoundStep) ProtoMessage() {} +func (*NewRoundStep) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{0} +} +func (m *NewRoundStep) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NewRoundStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewRoundStep.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NewRoundStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewRoundStep.Merge(m, src) +} +func (m *NewRoundStep) XXX_Size() int { + return m.Size() +} +func (m *NewRoundStep) XXX_DiscardUnknown() { + xxx_messageInfo_NewRoundStep.DiscardUnknown(m) +} + +var xxx_messageInfo_NewRoundStep proto.InternalMessageInfo + +func (m *NewRoundStep) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *NewRoundStep) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *NewRoundStep) GetStep() uint32 { + if m != nil { + return m.Step + } + return 0 +} + +func (m *NewRoundStep) GetSecondsSinceStartTime() int64 { + if m != nil { + return m.SecondsSinceStartTime + } + return 0 +} + +func (m *NewRoundStep) GetLastCommitRound() int32 { + if m != nil { + return m.LastCommitRound + } + return 0 +} + +// NewValidBlock is sent when a validator observes a valid block B in some round r, +//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type NewValidBlock struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockPartSetHeader types.PartSetHeader `protobuf:"bytes,3,opt,name=block_part_set_header,json=blockPartSetHeader,proto3" json:"block_part_set_header"` + BlockParts *bits.BitArray `protobuf:"bytes,4,opt,name=block_parts,json=blockParts,proto3" json:"block_parts,omitempty"` + IsCommit bool `protobuf:"varint,5,opt,name=is_commit,json=isCommit,proto3" json:"is_commit,omitempty"` +} + +func (m *NewValidBlock) Reset() { *m = NewValidBlock{} } +func (m *NewValidBlock) String() string { return proto.CompactTextString(m) } +func (*NewValidBlock) ProtoMessage() {} +func (*NewValidBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{1} +} +func (m *NewValidBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NewValidBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewValidBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NewValidBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewValidBlock.Merge(m, src) +} +func (m *NewValidBlock) XXX_Size() int { + return m.Size() +} +func (m *NewValidBlock) XXX_DiscardUnknown() { + xxx_messageInfo_NewValidBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_NewValidBlock proto.InternalMessageInfo + +func (m *NewValidBlock) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *NewValidBlock) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *NewValidBlock) GetBlockPartSetHeader() types.PartSetHeader { + if m != nil { + return m.BlockPartSetHeader + } + return types.PartSetHeader{} +} + +func (m *NewValidBlock) GetBlockParts() *bits.BitArray { + if m != nil { + return m.BlockParts + } + return nil +} + +func (m *NewValidBlock) GetIsCommit() bool { + if m != nil { + return m.IsCommit + } + return false +} + +// Proposal is sent when a new block is proposed. +type Proposal struct { + Proposal types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{2} +} +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return m.Size() +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetProposal() types.Proposal { + if m != nil { + return m.Proposal + } + return types.Proposal{} +} + +// ProposalPOL is sent when a previous proposal is re-proposed. +type ProposalPOL struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + ProposalPolRound int32 `protobuf:"varint,2,opt,name=proposal_pol_round,json=proposalPolRound,proto3" json:"proposal_pol_round,omitempty"` + ProposalPol bits.BitArray `protobuf:"bytes,3,opt,name=proposal_pol,json=proposalPol,proto3" json:"proposal_pol"` +} + +func (m *ProposalPOL) Reset() { *m = ProposalPOL{} } +func (m *ProposalPOL) String() string { return proto.CompactTextString(m) } +func (*ProposalPOL) ProtoMessage() {} +func (*ProposalPOL) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{3} +} +func (m *ProposalPOL) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProposalPOL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProposalPOL.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProposalPOL) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProposalPOL.Merge(m, src) +} +func (m *ProposalPOL) XXX_Size() int { + return m.Size() +} +func (m *ProposalPOL) XXX_DiscardUnknown() { + xxx_messageInfo_ProposalPOL.DiscardUnknown(m) +} + +var xxx_messageInfo_ProposalPOL proto.InternalMessageInfo + +func (m *ProposalPOL) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ProposalPOL) GetProposalPolRound() int32 { + if m != nil { + return m.ProposalPolRound + } + return 0 +} + +func (m *ProposalPOL) GetProposalPol() bits.BitArray { + if m != nil { + return m.ProposalPol + } + return bits.BitArray{} +} + +// BlockPart is sent when gossipping a piece of the proposed block. +type BlockPart struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Part types.Part `protobuf:"bytes,3,opt,name=part,proto3" json:"part"` +} + +func (m *BlockPart) Reset() { *m = BlockPart{} } +func (m *BlockPart) String() string { return proto.CompactTextString(m) } +func (*BlockPart) ProtoMessage() {} +func (*BlockPart) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{4} +} +func (m *BlockPart) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockPart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockPart.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockPart) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockPart.Merge(m, src) +} +func (m *BlockPart) XXX_Size() int { + return m.Size() +} +func (m *BlockPart) XXX_DiscardUnknown() { + xxx_messageInfo_BlockPart.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockPart proto.InternalMessageInfo + +func (m *BlockPart) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *BlockPart) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *BlockPart) GetPart() types.Part { + if m != nil { + return m.Part + } + return types.Part{} +} + +// Vote is sent when voting for a proposal (or lack thereof). +type Vote struct { + Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` +} + +func (m *Vote) Reset() { *m = Vote{} } +func (m *Vote) String() string { return proto.CompactTextString(m) } +func (*Vote) ProtoMessage() {} +func (*Vote) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{5} +} +func (m *Vote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vote) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vote.Merge(m, src) +} +func (m *Vote) XXX_Size() int { + return m.Size() +} +func (m *Vote) XXX_DiscardUnknown() { + xxx_messageInfo_Vote.DiscardUnknown(m) +} + +var xxx_messageInfo_Vote proto.InternalMessageInfo + +func (m *Vote) GetVote() *types.Vote { + if m != nil { + return m.Vote + } + return nil +} + +// HasVote is sent to indicate that a particular vote has been received. +type HasVote struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Index int32 `protobuf:"varint,4,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *HasVote) Reset() { *m = HasVote{} } +func (m *HasVote) String() string { return proto.CompactTextString(m) } +func (*HasVote) ProtoMessage() {} +func (*HasVote) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{6} +} +func (m *HasVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HasVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HasVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HasVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasVote.Merge(m, src) +} +func (m *HasVote) XXX_Size() int { + return m.Size() +} +func (m *HasVote) XXX_DiscardUnknown() { + xxx_messageInfo_HasVote.DiscardUnknown(m) +} + +var xxx_messageInfo_HasVote proto.InternalMessageInfo + +func (m *HasVote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *HasVote) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *HasVote) GetType() types.SignedMsgType { + if m != nil { + return m.Type + } + return types.UnknownType +} + +func (m *HasVote) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23 struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + BlockID types.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` +} + +func (m *VoteSetMaj23) Reset() { *m = VoteSetMaj23{} } +func (m *VoteSetMaj23) String() string { return proto.CompactTextString(m) } +func (*VoteSetMaj23) ProtoMessage() {} +func (*VoteSetMaj23) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{7} +} +func (m *VoteSetMaj23) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteSetMaj23) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteSetMaj23.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteSetMaj23) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteSetMaj23.Merge(m, src) +} +func (m *VoteSetMaj23) XXX_Size() int { + return m.Size() +} +func (m *VoteSetMaj23) XXX_DiscardUnknown() { + xxx_messageInfo_VoteSetMaj23.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteSetMaj23 proto.InternalMessageInfo + +func (m *VoteSetMaj23) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *VoteSetMaj23) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *VoteSetMaj23) GetType() types.SignedMsgType { + if m != nil { + return m.Type + } + return types.UnknownType +} + +func (m *VoteSetMaj23) GetBlockID() types.BlockID { + if m != nil { + return m.BlockID + } + return types.BlockID{} +} + +// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBits struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Type types.SignedMsgType `protobuf:"varint,3,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + BlockID types.BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Votes bits.BitArray `protobuf:"bytes,5,opt,name=votes,proto3" json:"votes"` +} + +func (m *VoteSetBits) Reset() { *m = VoteSetBits{} } +func (m *VoteSetBits) String() string { return proto.CompactTextString(m) } +func (*VoteSetBits) ProtoMessage() {} +func (*VoteSetBits) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{8} +} +func (m *VoteSetBits) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteSetBits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteSetBits.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VoteSetBits) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteSetBits.Merge(m, src) +} +func (m *VoteSetBits) XXX_Size() int { + return m.Size() +} +func (m *VoteSetBits) XXX_DiscardUnknown() { + xxx_messageInfo_VoteSetBits.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteSetBits proto.InternalMessageInfo + +func (m *VoteSetBits) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *VoteSetBits) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *VoteSetBits) GetType() types.SignedMsgType { + if m != nil { + return m.Type + } + return types.UnknownType +} + +func (m *VoteSetBits) GetBlockID() types.BlockID { + if m != nil { + return m.BlockID + } + return types.BlockID{} +} + +func (m *VoteSetBits) GetVotes() bits.BitArray { + if m != nil { + return m.Votes + } + return bits.BitArray{} +} + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_NewRoundStep + // *Message_NewValidBlock + // *Message_Proposal + // *Message_ProposalPol + // *Message_BlockPart + // *Message_Vote + // *Message_HasVote + // *Message_VoteSetMaj23 + // *Message_VoteSetBits + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_81a22d2efc008981, []int{9} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_NewRoundStep struct { + NewRoundStep *NewRoundStep `protobuf:"bytes,1,opt,name=new_round_step,json=newRoundStep,proto3,oneof" json:"new_round_step,omitempty"` +} +type Message_NewValidBlock struct { + NewValidBlock *NewValidBlock `protobuf:"bytes,2,opt,name=new_valid_block,json=newValidBlock,proto3,oneof" json:"new_valid_block,omitempty"` +} +type Message_Proposal struct { + Proposal *Proposal `protobuf:"bytes,3,opt,name=proposal,proto3,oneof" json:"proposal,omitempty"` +} +type Message_ProposalPol struct { + ProposalPol *ProposalPOL `protobuf:"bytes,4,opt,name=proposal_pol,json=proposalPol,proto3,oneof" json:"proposal_pol,omitempty"` +} +type Message_BlockPart struct { + BlockPart *BlockPart `protobuf:"bytes,5,opt,name=block_part,json=blockPart,proto3,oneof" json:"block_part,omitempty"` +} +type Message_Vote struct { + Vote *Vote `protobuf:"bytes,6,opt,name=vote,proto3,oneof" json:"vote,omitempty"` +} +type Message_HasVote struct { + HasVote *HasVote `protobuf:"bytes,7,opt,name=has_vote,json=hasVote,proto3,oneof" json:"has_vote,omitempty"` +} +type Message_VoteSetMaj23 struct { + VoteSetMaj23 *VoteSetMaj23 `protobuf:"bytes,8,opt,name=vote_set_maj23,json=voteSetMaj23,proto3,oneof" json:"vote_set_maj23,omitempty"` +} +type Message_VoteSetBits struct { + VoteSetBits *VoteSetBits `protobuf:"bytes,9,opt,name=vote_set_bits,json=voteSetBits,proto3,oneof" json:"vote_set_bits,omitempty"` +} + +func (*Message_NewRoundStep) isMessage_Sum() {} +func (*Message_NewValidBlock) isMessage_Sum() {} +func (*Message_Proposal) isMessage_Sum() {} +func (*Message_ProposalPol) isMessage_Sum() {} +func (*Message_BlockPart) isMessage_Sum() {} +func (*Message_Vote) isMessage_Sum() {} +func (*Message_HasVote) isMessage_Sum() {} +func (*Message_VoteSetMaj23) isMessage_Sum() {} +func (*Message_VoteSetBits) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetNewRoundStep() *NewRoundStep { + if x, ok := m.GetSum().(*Message_NewRoundStep); ok { + return x.NewRoundStep + } + return nil +} + +func (m *Message) GetNewValidBlock() *NewValidBlock { + if x, ok := m.GetSum().(*Message_NewValidBlock); ok { + return x.NewValidBlock + } + return nil +} + +func (m *Message) GetProposal() *Proposal { + if x, ok := m.GetSum().(*Message_Proposal); ok { + return x.Proposal + } + return nil +} + +func (m *Message) GetProposalPol() *ProposalPOL { + if x, ok := m.GetSum().(*Message_ProposalPol); ok { + return x.ProposalPol + } + return nil +} + +func (m *Message) GetBlockPart() *BlockPart { + if x, ok := m.GetSum().(*Message_BlockPart); ok { + return x.BlockPart + } + return nil +} + +func (m *Message) GetVote() *Vote { + if x, ok := m.GetSum().(*Message_Vote); ok { + return x.Vote + } + return nil +} + +func (m *Message) GetHasVote() *HasVote { + if x, ok := m.GetSum().(*Message_HasVote); ok { + return x.HasVote + } + return nil +} + +func (m *Message) GetVoteSetMaj23() *VoteSetMaj23 { + if x, ok := m.GetSum().(*Message_VoteSetMaj23); ok { + return x.VoteSetMaj23 + } + return nil +} + +func (m *Message) GetVoteSetBits() *VoteSetBits { + if x, ok := m.GetSum().(*Message_VoteSetBits); ok { + return x.VoteSetBits + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_NewRoundStep)(nil), + (*Message_NewValidBlock)(nil), + (*Message_Proposal)(nil), + (*Message_ProposalPol)(nil), + (*Message_BlockPart)(nil), + (*Message_Vote)(nil), + (*Message_HasVote)(nil), + (*Message_VoteSetMaj23)(nil), + (*Message_VoteSetBits)(nil), + } +} + +func init() { + proto.RegisterType((*NewRoundStep)(nil), "tendermint.consensus.NewRoundStep") + proto.RegisterType((*NewValidBlock)(nil), "tendermint.consensus.NewValidBlock") + proto.RegisterType((*Proposal)(nil), "tendermint.consensus.Proposal") + proto.RegisterType((*ProposalPOL)(nil), "tendermint.consensus.ProposalPOL") + proto.RegisterType((*BlockPart)(nil), "tendermint.consensus.BlockPart") + proto.RegisterType((*Vote)(nil), "tendermint.consensus.Vote") + proto.RegisterType((*HasVote)(nil), "tendermint.consensus.HasVote") + proto.RegisterType((*VoteSetMaj23)(nil), "tendermint.consensus.VoteSetMaj23") + proto.RegisterType((*VoteSetBits)(nil), "tendermint.consensus.VoteSetBits") + proto.RegisterType((*Message)(nil), "tendermint.consensus.Message") +} + +func init() { proto.RegisterFile("tendermint/consensus/types.proto", fileDescriptor_81a22d2efc008981) } + +var fileDescriptor_81a22d2efc008981 = []byte{ + // 853 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x56, 0x4f, 0x8f, 0xdb, 0x44, + 0x14, 0xb7, 0x59, 0x67, 0x93, 0x7d, 0xde, 0xec, 0xc2, 0x68, 0x5b, 0x85, 0x00, 0x49, 0x30, 0x97, + 0x15, 0x42, 0x0e, 0xca, 0x1e, 0x90, 0x0a, 0x12, 0x60, 0xfe, 0xd4, 0xad, 0x9a, 0x36, 0x72, 0x4a, + 0x85, 0xb8, 0x58, 0x4e, 0x3c, 0x4a, 0x86, 0xc6, 0x1e, 0xcb, 0x33, 0xc9, 0xb2, 0x57, 0x3e, 0x01, + 0x1f, 0x80, 0xaf, 0x81, 0xc4, 0x47, 0xe8, 0xb1, 0x47, 0x4e, 0x15, 0xca, 0x7e, 0x04, 0x04, 0x67, + 0x34, 0xe3, 0x49, 0x3c, 0xa1, 0xde, 0x85, 0xbd, 0x20, 0xf5, 0x36, 0xe3, 0xf7, 0xde, 0x6f, 0xde, + 0xfc, 0xde, 0x7b, 0x3f, 0x0f, 0xf4, 0x38, 0x4e, 0x63, 0x9c, 0x27, 0x24, 0xe5, 0xfd, 0x29, 0x4d, + 0x19, 0x4e, 0xd9, 0x92, 0xf5, 0xf9, 0x45, 0x86, 0x99, 0x9b, 0xe5, 0x94, 0x53, 0x74, 0x52, 0x7a, + 0xb8, 0x5b, 0x8f, 0xf6, 0xc9, 0x8c, 0xce, 0xa8, 0x74, 0xe8, 0x8b, 0x55, 0xe1, 0xdb, 0x7e, 0x5b, + 0x43, 0x93, 0x18, 0x3a, 0x52, 0x5b, 0x3f, 0x6b, 0x41, 0x26, 0xac, 0x3f, 0x21, 0x7c, 0xc7, 0xc3, + 0xf9, 0xc5, 0x84, 0xc3, 0x87, 0xf8, 0x3c, 0xa0, 0xcb, 0x34, 0x1e, 0x73, 0x9c, 0xa1, 0xdb, 0xb0, + 0x3f, 0xc7, 0x64, 0x36, 0xe7, 0x2d, 0xb3, 0x67, 0x9e, 0xee, 0x05, 0x6a, 0x87, 0x4e, 0xa0, 0x96, + 0x0b, 0xa7, 0xd6, 0x6b, 0x3d, 0xf3, 0xb4, 0x16, 0x14, 0x1b, 0x84, 0xc0, 0x62, 0x1c, 0x67, 0xad, + 0xbd, 0x9e, 0x79, 0xda, 0x0c, 0xe4, 0x1a, 0x7d, 0x04, 0x2d, 0x86, 0xa7, 0x34, 0x8d, 0x59, 0xc8, + 0x48, 0x3a, 0xc5, 0x21, 0xe3, 0x51, 0xce, 0x43, 0x4e, 0x12, 0xdc, 0xb2, 0x24, 0xe6, 0x2d, 0x65, + 0x1f, 0x0b, 0xf3, 0x58, 0x58, 0x1f, 0x93, 0x04, 0xa3, 0xf7, 0xe1, 0x8d, 0x45, 0xc4, 0x78, 0x38, + 0xa5, 0x49, 0x42, 0x78, 0x58, 0x1c, 0x57, 0x93, 0xc7, 0x1d, 0x0b, 0xc3, 0x17, 0xf2, 0xbb, 0x4c, + 0xd5, 0xf9, 0xd3, 0x84, 0xe6, 0x43, 0x7c, 0xfe, 0x24, 0x5a, 0x90, 0xd8, 0x5b, 0xd0, 0xe9, 0xd3, + 0x1b, 0x26, 0xfe, 0x2d, 0xdc, 0x9a, 0x88, 0xb0, 0x30, 0x13, 0xb9, 0x31, 0xcc, 0xc3, 0x39, 0x8e, + 0x62, 0x9c, 0xcb, 0x9b, 0xd8, 0x83, 0xae, 0xab, 0xd5, 0xa0, 0xe0, 0x6b, 0x14, 0xe5, 0x7c, 0x8c, + 0xb9, 0x2f, 0xdd, 0x3c, 0xeb, 0xd9, 0x8b, 0xae, 0x11, 0x20, 0x89, 0xb1, 0x63, 0x41, 0x9f, 0x82, + 0x5d, 0x22, 0x33, 0x79, 0x63, 0x7b, 0xd0, 0xd1, 0xf1, 0x44, 0x25, 0x5c, 0x51, 0x09, 0xd7, 0x23, + 0xfc, 0xf3, 0x3c, 0x8f, 0x2e, 0x02, 0xd8, 0x02, 0x31, 0xf4, 0x16, 0x1c, 0x10, 0xa6, 0x48, 0x90, + 0xd7, 0x6f, 0x04, 0x0d, 0xc2, 0x8a, 0xcb, 0x3b, 0x3e, 0x34, 0x46, 0x39, 0xcd, 0x28, 0x8b, 0x16, + 0xe8, 0x13, 0x68, 0x64, 0x6a, 0x2d, 0xef, 0x6c, 0x0f, 0xda, 0x15, 0x69, 0x2b, 0x0f, 0x95, 0xf1, + 0x36, 0xc2, 0xf9, 0xd9, 0x04, 0x7b, 0x63, 0x1c, 0x3d, 0x7a, 0x70, 0x25, 0x7f, 0x1f, 0x00, 0xda, + 0xc4, 0x84, 0x19, 0x5d, 0x84, 0x3a, 0x99, 0xaf, 0x6f, 0x2c, 0x23, 0xba, 0x90, 0x75, 0x41, 0x77, + 0xe1, 0x50, 0xf7, 0x56, 0x74, 0xfe, 0xcb, 0xf5, 0x55, 0x6e, 0xb6, 0x86, 0xe6, 0x3c, 0x85, 0x03, + 0x6f, 0xc3, 0xc9, 0x0d, 0x6b, 0xfb, 0x21, 0x58, 0x82, 0x7b, 0x75, 0xf6, 0xed, 0xea, 0x52, 0xaa, + 0x33, 0xa5, 0xa7, 0x33, 0x00, 0xeb, 0x09, 0xe5, 0xa2, 0x03, 0xad, 0x15, 0xe5, 0x58, 0xb1, 0x59, + 0x11, 0x29, 0xbc, 0x02, 0xe9, 0xe3, 0xfc, 0x68, 0x42, 0xdd, 0x8f, 0x98, 0x8c, 0xbb, 0x59, 0x7e, + 0x67, 0x60, 0x09, 0x34, 0x99, 0xdf, 0x51, 0x55, 0xab, 0x8d, 0xc9, 0x2c, 0xc5, 0xf1, 0x90, 0xcd, + 0x1e, 0x5f, 0x64, 0x38, 0x90, 0xce, 0x02, 0x8a, 0xa4, 0x31, 0xfe, 0x41, 0x36, 0x54, 0x2d, 0x28, + 0x36, 0xce, 0xaf, 0x26, 0x1c, 0x8a, 0x0c, 0xc6, 0x98, 0x0f, 0xa3, 0xef, 0x07, 0x67, 0xff, 0x47, + 0x26, 0x5f, 0x41, 0xa3, 0x68, 0x70, 0x12, 0xab, 0xee, 0x7e, 0xf3, 0xe5, 0x40, 0x59, 0xbb, 0x7b, + 0x5f, 0x7a, 0xc7, 0x82, 0xe5, 0xf5, 0x8b, 0x6e, 0x5d, 0x7d, 0x08, 0xea, 0x32, 0xf6, 0x5e, 0xec, + 0xfc, 0x61, 0x82, 0xad, 0x52, 0xf7, 0x08, 0x67, 0xaf, 0x4e, 0xe6, 0xe8, 0x0e, 0xd4, 0x44, 0x07, + 0x30, 0x39, 0x9c, 0xff, 0xb5, 0xb9, 0x8b, 0x10, 0xe7, 0x2f, 0x0b, 0xea, 0x43, 0xcc, 0x58, 0x34, + 0xc3, 0xe8, 0x3e, 0x1c, 0xa5, 0xf8, 0xbc, 0x18, 0xa8, 0x50, 0xca, 0x68, 0xd1, 0x77, 0x8e, 0x5b, + 0xf5, 0x03, 0x70, 0x75, 0x99, 0xf6, 0x8d, 0xe0, 0x30, 0xd5, 0x65, 0x7b, 0x08, 0xc7, 0x02, 0x6b, + 0x25, 0xf4, 0x30, 0x94, 0x89, 0x4a, 0xbe, 0xec, 0xc1, 0x7b, 0x57, 0x82, 0x95, 0xda, 0xe9, 0x1b, + 0x41, 0x33, 0xdd, 0x11, 0x53, 0x5d, 0x5a, 0x2a, 0x46, 0xb8, 0xc4, 0xd9, 0x28, 0x88, 0xaf, 0x49, + 0x0b, 0xfa, 0xfa, 0x1f, 0x22, 0x50, 0x70, 0xfd, 0xee, 0xf5, 0x08, 0xa3, 0x47, 0x0f, 0xfc, 0x5d, + 0x0d, 0x40, 0x9f, 0x01, 0x94, 0x52, 0xaa, 0xd8, 0xee, 0x56, 0xa3, 0x6c, 0xb5, 0xc2, 0x37, 0x82, + 0x83, 0xad, 0x98, 0x0a, 0x29, 0x90, 0x03, 0xbd, 0xff, 0xb2, 0x3c, 0x96, 0xb1, 0xa2, 0x0b, 0x7d, + 0xa3, 0x18, 0x6b, 0x74, 0x07, 0x1a, 0xf3, 0x88, 0x85, 0x32, 0xaa, 0x2e, 0xa3, 0xde, 0xa9, 0x8e, + 0x52, 0xb3, 0xef, 0x1b, 0x41, 0x7d, 0xae, 0x64, 0xe0, 0x3e, 0x1c, 0x89, 0x38, 0xf9, 0x3b, 0x49, + 0xc4, 0x38, 0xb6, 0x1a, 0xd7, 0x15, 0x54, 0x1f, 0x5c, 0x51, 0xd0, 0x95, 0x3e, 0xc8, 0x77, 0xa1, + 0xb9, 0xc5, 0x12, 0xfd, 0xd4, 0x3a, 0xb8, 0x8e, 0x44, 0x6d, 0x90, 0x04, 0x89, 0xab, 0x72, 0xeb, + 0xd5, 0x60, 0x8f, 0x2d, 0x13, 0xef, 0x9b, 0x67, 0xeb, 0x8e, 0xf9, 0x7c, 0xdd, 0x31, 0x7f, 0x5f, + 0x77, 0xcc, 0x9f, 0x2e, 0x3b, 0xc6, 0xf3, 0xcb, 0x8e, 0xf1, 0xdb, 0x65, 0xc7, 0xf8, 0xee, 0xe3, + 0x19, 0xe1, 0xf3, 0xe5, 0xc4, 0x9d, 0xd2, 0xa4, 0xaf, 0xbf, 0x26, 0xca, 0x65, 0xf1, 0xea, 0xa8, + 0x7a, 0xb7, 0x4c, 0xf6, 0xa5, 0xed, 0xec, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x5c, 0x91, + 0x04, 0xd6, 0x08, 0x00, 0x00, +} + +func (m *NewRoundStep) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NewRoundStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewRoundStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastCommitRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastCommitRound)) + i-- + dAtA[i] = 0x28 + } + if m.SecondsSinceStartTime != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SecondsSinceStartTime)) + i-- + dAtA[i] = 0x20 + } + if m.Step != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NewValidBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NewValidBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewValidBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsCommit { + i-- + if m.IsCommit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.BlockParts != nil { + { + size, err := m.BlockParts.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.BlockPartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProposalPOL) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProposalPOL) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProposalPOL) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProposalPol.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.ProposalPolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ProposalPolRound)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockPart) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockPart) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Part.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Vote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HasVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HasVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HasVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x20 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VoteSetMaj23) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VoteSetBits) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteSetBits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Votes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_NewRoundStep) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NewRoundStep) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NewRoundStep != nil { + { + size, err := m.NewRoundStep.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_NewValidBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_NewValidBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.NewValidBlock != nil { + { + size, err := m.NewValidBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Proposal != nil { + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_ProposalPol) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ProposalPol) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProposalPol != nil { + { + size, err := m.ProposalPol.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_BlockPart) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPart) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.BlockPart != nil { + { + size, err := m.BlockPart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Message_HasVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_HasVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HasVote != nil { + { + size, err := m.HasVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_VoteSetMaj23) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_VoteSetMaj23) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VoteSetMaj23 != nil { + { + size, err := m.VoteSetMaj23.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *Message_VoteSetBits) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_VoteSetBits) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VoteSetBits != nil { + { + size, err := m.VoteSetBits.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NewRoundStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Step != 0 { + n += 1 + sovTypes(uint64(m.Step)) + } + if m.SecondsSinceStartTime != 0 { + n += 1 + sovTypes(uint64(m.SecondsSinceStartTime)) + } + if m.LastCommitRound != 0 { + n += 1 + sovTypes(uint64(m.LastCommitRound)) + } + return n +} + +func (m *NewValidBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockPartSetHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockParts != nil { + l = m.BlockParts.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.IsCommit { + n += 2 + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *ProposalPOL) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.ProposalPolRound != 0 { + n += 1 + sovTypes(uint64(m.ProposalPolRound)) + } + l = m.ProposalPol.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *BlockPart) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.Part.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HasVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *VoteSetMaj23) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *VoteSetBits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.Votes.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_NewRoundStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewRoundStep != nil { + l = m.NewRoundStep.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_NewValidBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NewValidBlock != nil { + l = m.NewValidBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proposal != nil { + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ProposalPol) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProposalPol != nil { + l = m.ProposalPol.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_BlockPart) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockPart != nil { + l = m.BlockPart.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_HasVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HasVote != nil { + l = m.HasVote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_VoteSetMaj23) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteSetMaj23 != nil { + l = m.VoteSetMaj23.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_VoteSetBits) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteSetBits != nil { + l = m.VoteSetBits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NewRoundStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewRoundStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewRoundStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondsSinceStartTime", wireType) + } + m.SecondsSinceStartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondsSinceStartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitRound", wireType) + } + m.LastCommitRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastCommitRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NewValidBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewValidBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewValidBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockPartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockParts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockParts == nil { + m.BlockParts = &bits.BitArray{} + } + if err := m.BlockParts.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsCommit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsCommit = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProposalPOL) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProposalPOL: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProposalPOL: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPolRound", wireType) + } + m.ProposalPolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposalPolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPol", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProposalPol.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockPart) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPart: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPart: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Part", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Part.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &types.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HasVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HasVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HasVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= types.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteSetMaj23) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteSetMaj23: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteSetMaj23: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= types.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteSetBits) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VoteSetBits: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VoteSetBits: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= types.SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Votes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewRoundStep", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NewRoundStep{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NewRoundStep{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewValidBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NewValidBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_NewValidBlock{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Proposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Proposal{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposalPol", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ProposalPOL{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ProposalPol{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BlockPart{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_BlockPart{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Vote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Vote{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HasVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &HasVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_HasVote{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteSetMaj23", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &VoteSetMaj23{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_VoteSetMaj23{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteSetBits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &VoteSetBits{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_VoteSetBits{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto new file mode 100644 index 000000000..6e1f41371 --- /dev/null +++ b/proto/tendermint/consensus/types.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; +package tendermint.consensus; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; + +import "gogoproto/gogo.proto"; +import "tendermint/types/types.proto"; +import "tendermint/libs/bits/types.proto"; + +// NewRoundStep is sent for every step taken in the ConsensusState. +// For every height/round/step transition +message NewRoundStep { + int64 height = 1; + int32 round = 2; + uint32 step = 3; + int64 seconds_since_start_time = 4; + int32 last_commit_round = 5; +} + +// NewValidBlock is sent when a validator observes a valid block B in some round r, +//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +message NewValidBlock { + int64 height = 1; + int32 round = 2; + tendermint.types.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; + tendermint.libs.bits.BitArray block_parts = 4; + bool is_commit = 5; +} + +// Proposal is sent when a new block is proposed. +message Proposal { + tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; +} + +// ProposalPOL is sent when a previous proposal is re-proposed. +message ProposalPOL { + int64 height = 1; + int32 proposal_pol_round = 2; + tendermint.libs.bits.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; +} + +// BlockPart is sent when gossipping a piece of the proposed block. +message BlockPart { + int64 height = 1; + int32 round = 2; + tendermint.types.Part part = 3 [(gogoproto.nullable) = false]; +} + +// Vote is sent when voting for a proposal (or lack thereof). +message Vote { + tendermint.types.Vote vote = 1; +} + +// HasVote is sent to indicate that a particular vote has been received. +message HasVote { + int64 height = 1; + int32 round = 2; + tendermint.types.SignedMsgType type = 3; + int32 index = 4; +} + +// VoteSetMaj23 is sent to indicate that a given BlockID has seen +2/3 votes. +message VoteSetMaj23 { + int64 height = 1; + int32 round = 2; + tendermint.types.SignedMsgType type = 3; + tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; +} + +// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +message VoteSetBits { + int64 height = 1; + int32 round = 2; + tendermint.types.SignedMsgType type = 3; + tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; +} + +message Message { + oneof sum { + NewRoundStep new_round_step = 1; + NewValidBlock new_valid_block = 2; + Proposal proposal = 3; + ProposalPOL proposal_pol = 4; + BlockPart block_part = 5; + Vote vote = 6; + HasVote has_vote = 7; + VoteSetMaj23 vote_set_maj23 = 8; + VoteSetBits vote_set_bits = 9; + } +} diff --git a/proto/tendermint/consensus/wal.pb.go b/proto/tendermint/consensus/wal.pb.go new file mode 100644 index 000000000..86ff1be01 --- /dev/null +++ b/proto/tendermint/consensus/wal.pb.go @@ -0,0 +1,1555 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/consensus/wal.proto + +package consensus + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + types "github.com/tendermint/tendermint/proto/tendermint/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgInfo are msgs from the reactor which may update the state +type MsgInfo struct { + Msg Message `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg"` + PeerID string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` +} + +func (m *MsgInfo) Reset() { *m = MsgInfo{} } +func (m *MsgInfo) String() string { return proto.CompactTextString(m) } +func (*MsgInfo) ProtoMessage() {} +func (*MsgInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ed0b60c2d348ab09, []int{0} +} +func (m *MsgInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgInfo.Merge(m, src) +} +func (m *MsgInfo) XXX_Size() int { + return m.Size() +} +func (m *MsgInfo) XXX_DiscardUnknown() { + xxx_messageInfo_MsgInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgInfo proto.InternalMessageInfo + +func (m *MsgInfo) GetMsg() Message { + if m != nil { + return m.Msg + } + return Message{} +} + +func (m *MsgInfo) GetPeerID() string { + if m != nil { + return m.PeerID + } + return "" +} + +// TimeoutInfo internally generated messages which may update the state +type TimeoutInfo struct { + Duration time.Duration `protobuf:"bytes,1,opt,name=duration,proto3,stdduration" json:"duration"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + Step uint32 `protobuf:"varint,4,opt,name=step,proto3" json:"step,omitempty"` +} + +func (m *TimeoutInfo) Reset() { *m = TimeoutInfo{} } +func (m *TimeoutInfo) String() string { return proto.CompactTextString(m) } +func (*TimeoutInfo) ProtoMessage() {} +func (*TimeoutInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ed0b60c2d348ab09, []int{1} +} +func (m *TimeoutInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeoutInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeoutInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeoutInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeoutInfo.Merge(m, src) +} +func (m *TimeoutInfo) XXX_Size() int { + return m.Size() +} +func (m *TimeoutInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TimeoutInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeoutInfo proto.InternalMessageInfo + +func (m *TimeoutInfo) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *TimeoutInfo) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *TimeoutInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *TimeoutInfo) GetStep() uint32 { + if m != nil { + return m.Step + } + return 0 +} + +// EndHeight marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +type EndHeight struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *EndHeight) Reset() { *m = EndHeight{} } +func (m *EndHeight) String() string { return proto.CompactTextString(m) } +func (*EndHeight) ProtoMessage() {} +func (*EndHeight) Descriptor() ([]byte, []int) { + return fileDescriptor_ed0b60c2d348ab09, []int{2} +} +func (m *EndHeight) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndHeight) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EndHeight.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EndHeight) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndHeight.Merge(m, src) +} +func (m *EndHeight) XXX_Size() int { + return m.Size() +} +func (m *EndHeight) XXX_DiscardUnknown() { + xxx_messageInfo_EndHeight.DiscardUnknown(m) +} + +var xxx_messageInfo_EndHeight proto.InternalMessageInfo + +func (m *EndHeight) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +type WALMessage struct { + // Types that are valid to be assigned to Sum: + // *WALMessage_EventDataRoundState + // *WALMessage_MsgInfo + // *WALMessage_TimeoutInfo + // *WALMessage_EndHeight + Sum isWALMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *WALMessage) Reset() { *m = WALMessage{} } +func (m *WALMessage) String() string { return proto.CompactTextString(m) } +func (*WALMessage) ProtoMessage() {} +func (*WALMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_ed0b60c2d348ab09, []int{3} +} +func (m *WALMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WALMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WALMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WALMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WALMessage.Merge(m, src) +} +func (m *WALMessage) XXX_Size() int { + return m.Size() +} +func (m *WALMessage) XXX_DiscardUnknown() { + xxx_messageInfo_WALMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_WALMessage proto.InternalMessageInfo + +type isWALMessage_Sum interface { + isWALMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type WALMessage_EventDataRoundState struct { + EventDataRoundState *types.EventDataRoundState `protobuf:"bytes,1,opt,name=event_data_round_state,json=eventDataRoundState,proto3,oneof" json:"event_data_round_state,omitempty"` +} +type WALMessage_MsgInfo struct { + MsgInfo *MsgInfo `protobuf:"bytes,2,opt,name=msg_info,json=msgInfo,proto3,oneof" json:"msg_info,omitempty"` +} +type WALMessage_TimeoutInfo struct { + TimeoutInfo *TimeoutInfo `protobuf:"bytes,3,opt,name=timeout_info,json=timeoutInfo,proto3,oneof" json:"timeout_info,omitempty"` +} +type WALMessage_EndHeight struct { + EndHeight *EndHeight `protobuf:"bytes,4,opt,name=end_height,json=endHeight,proto3,oneof" json:"end_height,omitempty"` +} + +func (*WALMessage_EventDataRoundState) isWALMessage_Sum() {} +func (*WALMessage_MsgInfo) isWALMessage_Sum() {} +func (*WALMessage_TimeoutInfo) isWALMessage_Sum() {} +func (*WALMessage_EndHeight) isWALMessage_Sum() {} + +func (m *WALMessage) GetSum() isWALMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *WALMessage) GetEventDataRoundState() *types.EventDataRoundState { + if x, ok := m.GetSum().(*WALMessage_EventDataRoundState); ok { + return x.EventDataRoundState + } + return nil +} + +func (m *WALMessage) GetMsgInfo() *MsgInfo { + if x, ok := m.GetSum().(*WALMessage_MsgInfo); ok { + return x.MsgInfo + } + return nil +} + +func (m *WALMessage) GetTimeoutInfo() *TimeoutInfo { + if x, ok := m.GetSum().(*WALMessage_TimeoutInfo); ok { + return x.TimeoutInfo + } + return nil +} + +func (m *WALMessage) GetEndHeight() *EndHeight { + if x, ok := m.GetSum().(*WALMessage_EndHeight); ok { + return x.EndHeight + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*WALMessage) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*WALMessage_EventDataRoundState)(nil), + (*WALMessage_MsgInfo)(nil), + (*WALMessage_TimeoutInfo)(nil), + (*WALMessage_EndHeight)(nil), + } +} + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +type TimedWALMessage struct { + Time time.Time `protobuf:"bytes,1,opt,name=time,proto3,stdtime" json:"time"` + Msg *WALMessage `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` +} + +func (m *TimedWALMessage) Reset() { *m = TimedWALMessage{} } +func (m *TimedWALMessage) String() string { return proto.CompactTextString(m) } +func (*TimedWALMessage) ProtoMessage() {} +func (*TimedWALMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_ed0b60c2d348ab09, []int{4} +} +func (m *TimedWALMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimedWALMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimedWALMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimedWALMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimedWALMessage.Merge(m, src) +} +func (m *TimedWALMessage) XXX_Size() int { + return m.Size() +} +func (m *TimedWALMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TimedWALMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TimedWALMessage proto.InternalMessageInfo + +func (m *TimedWALMessage) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *TimedWALMessage) GetMsg() *WALMessage { + if m != nil { + return m.Msg + } + return nil +} + +func init() { + proto.RegisterType((*MsgInfo)(nil), "tendermint.consensus.MsgInfo") + proto.RegisterType((*TimeoutInfo)(nil), "tendermint.consensus.TimeoutInfo") + proto.RegisterType((*EndHeight)(nil), "tendermint.consensus.EndHeight") + proto.RegisterType((*WALMessage)(nil), "tendermint.consensus.WALMessage") + proto.RegisterType((*TimedWALMessage)(nil), "tendermint.consensus.TimedWALMessage") +} + +func init() { proto.RegisterFile("tendermint/consensus/wal.proto", fileDescriptor_ed0b60c2d348ab09) } + +var fileDescriptor_ed0b60c2d348ab09 = []byte{ + // 539 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0xdd, 0x8a, 0xd3, 0x40, + 0x14, 0xce, 0x6c, 0xff, 0x4f, 0x15, 0x21, 0x96, 0xa5, 0x16, 0x36, 0x8d, 0x5d, 0x84, 0x5e, 0x25, + 0xb0, 0x22, 0x88, 0x5e, 0xa8, 0xa5, 0x2b, 0x2d, 0xb8, 0x20, 0xe3, 0x8a, 0x20, 0x42, 0x48, 0x37, + 0xa7, 0x69, 0x60, 0x33, 0x53, 0x32, 0x13, 0xc5, 0x2b, 0x5f, 0xa1, 0x97, 0xbe, 0x89, 0xaf, 0xb0, + 0x97, 0x7b, 0xe9, 0xd5, 0x2a, 0xed, 0x8b, 0x48, 0x66, 0xd2, 0x36, 0xb8, 0xd9, 0xbb, 0x39, 0x73, + 0xbe, 0x73, 0xbe, 0x73, 0xbe, 0x6f, 0x06, 0x2c, 0x89, 0x2c, 0xc0, 0x24, 0x8e, 0x98, 0x74, 0x2f, + 0x38, 0x13, 0xc8, 0x44, 0x2a, 0xdc, 0x6f, 0xfe, 0xa5, 0xb3, 0x4c, 0xb8, 0xe4, 0x66, 0x67, 0x9f, + 0x77, 0x76, 0xf9, 0x5e, 0x27, 0xe4, 0x21, 0x57, 0x00, 0x37, 0x3b, 0x69, 0x6c, 0xcf, 0x2e, 0xed, + 0x25, 0xbf, 0x2f, 0x51, 0xe4, 0x88, 0xa3, 0x02, 0x42, 0xdd, 0xbb, 0xf8, 0x15, 0x99, 0xdc, 0xa6, + 0xad, 0x90, 0xf3, 0xf0, 0x12, 0x5d, 0x15, 0xcd, 0xd2, 0xb9, 0x1b, 0xa4, 0x89, 0x2f, 0x23, 0xce, + 0xf2, 0x7c, 0xff, 0xff, 0xbc, 0x8c, 0x62, 0x14, 0xd2, 0x8f, 0x97, 0x1a, 0x30, 0x40, 0x68, 0x9c, + 0x89, 0x70, 0xca, 0xe6, 0xdc, 0x7c, 0x06, 0x95, 0x58, 0x84, 0x5d, 0x62, 0x93, 0x61, 0xfb, 0xe4, + 0xc8, 0x29, 0x5b, 0xc3, 0x39, 0x43, 0x21, 0xfc, 0x10, 0x47, 0xd5, 0xab, 0x9b, 0xbe, 0x41, 0x33, + 0xbc, 0x79, 0x0c, 0x8d, 0x25, 0x62, 0xe2, 0x45, 0x41, 0xf7, 0xc0, 0x26, 0xc3, 0xd6, 0x08, 0xd6, + 0x37, 0xfd, 0xfa, 0x7b, 0xc4, 0x64, 0x3a, 0xa6, 0xf5, 0x2c, 0x35, 0x0d, 0x06, 0x2b, 0x02, 0xed, + 0xf3, 0x28, 0x46, 0x9e, 0x4a, 0xc5, 0xf5, 0x0a, 0x9a, 0xdb, 0x49, 0x73, 0xc2, 0x47, 0x8e, 0x1e, + 0xd5, 0xd9, 0x8e, 0xea, 0x8c, 0x73, 0xc0, 0xa8, 0x99, 0x91, 0xfd, 0xfc, 0xd3, 0x27, 0x74, 0x57, + 0x64, 0x1e, 0x42, 0x7d, 0x81, 0x51, 0xb8, 0x90, 0x8a, 0xb4, 0x42, 0xf3, 0xc8, 0xec, 0x40, 0x2d, + 0xe1, 0x29, 0x0b, 0xba, 0x15, 0x9b, 0x0c, 0x6b, 0x54, 0x07, 0xa6, 0x09, 0x55, 0x21, 0x71, 0xd9, + 0xad, 0xda, 0x64, 0x78, 0x9f, 0xaa, 0xf3, 0xe0, 0x18, 0x5a, 0xa7, 0x2c, 0x98, 0xe8, 0xb2, 0x7d, + 0x3b, 0x52, 0x6c, 0x37, 0xf8, 0x75, 0x00, 0xf0, 0xe9, 0xcd, 0xbb, 0x7c, 0x6d, 0xf3, 0x0b, 0x1c, + 0x2a, 0xf9, 0xbd, 0xc0, 0x97, 0xbe, 0xa7, 0x7a, 0x7b, 0x42, 0xfa, 0x12, 0xf3, 0x25, 0x9e, 0x14, + 0x55, 0xd3, 0x36, 0x9e, 0x66, 0xf8, 0xb1, 0x2f, 0x7d, 0x9a, 0xa1, 0x3f, 0x64, 0xe0, 0x89, 0x41, + 0x1f, 0xe2, 0xed, 0x6b, 0xf3, 0x05, 0x34, 0x63, 0x11, 0x7a, 0x11, 0x9b, 0x73, 0xb5, 0xd5, 0xdd, + 0x2e, 0x68, 0xc7, 0x26, 0x06, 0x6d, 0xc4, 0xb9, 0x79, 0x6f, 0xe1, 0x9e, 0xd4, 0xfa, 0xea, 0xfa, + 0x8a, 0xaa, 0x7f, 0x5c, 0x5e, 0x5f, 0x70, 0x62, 0x62, 0xd0, 0xb6, 0x2c, 0x18, 0xf3, 0x1a, 0x00, + 0x59, 0xe0, 0xe5, 0x62, 0x54, 0x55, 0x97, 0x7e, 0x79, 0x97, 0x9d, 0x7a, 0x13, 0x83, 0xb6, 0x70, + 0x1b, 0x8c, 0x6a, 0x50, 0x11, 0x69, 0x3c, 0xf8, 0x01, 0x0f, 0x32, 0x9a, 0xa0, 0xa0, 0xde, 0x73, + 0xa8, 0x66, 0x54, 0xb9, 0x56, 0xbd, 0x5b, 0x86, 0x9f, 0x6f, 0xdf, 0xa6, 0x76, 0x7c, 0x95, 0x39, + 0xae, 0x2a, 0xcc, 0x13, 0xfd, 0x34, 0xb5, 0x28, 0x76, 0xf9, 0x38, 0x7b, 0x22, 0xf5, 0x2e, 0x47, + 0x1f, 0xaf, 0xd6, 0x16, 0xb9, 0x5e, 0x5b, 0xe4, 0xef, 0xda, 0x22, 0xab, 0x8d, 0x65, 0x5c, 0x6f, + 0x2c, 0xe3, 0xf7, 0xc6, 0x32, 0x3e, 0xbf, 0x0c, 0x23, 0xb9, 0x48, 0x67, 0xce, 0x05, 0x8f, 0xdd, + 0xe2, 0xf7, 0xda, 0x1f, 0xf5, 0x47, 0x2d, 0xfb, 0x9c, 0xb3, 0xba, 0xca, 0x3d, 0xfd, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x0b, 0xad, 0x1c, 0x1b, 0x07, 0x04, 0x00, 0x00, +} + +func (m *MsgInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PeerID) > 0 { + i -= len(m.PeerID) + copy(dAtA[i:], m.PeerID) + i = encodeVarintWal(dAtA, i, uint64(len(m.PeerID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Msg.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TimeoutInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeoutInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeoutInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Step != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Step)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintWal(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndHeight) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WALMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WALMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *WALMessage_EventDataRoundState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_EventDataRoundState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EventDataRoundState != nil { + { + size, err := m.EventDataRoundState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *WALMessage_MsgInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_MsgInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.MsgInfo != nil { + { + size, err := m.MsgInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *WALMessage_TimeoutInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_TimeoutInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TimeoutInfo != nil { + { + size, err := m.TimeoutInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *WALMessage_EndHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WALMessage_EndHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.EndHeight != nil { + { + size, err := m.EndHeight.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *TimedWALMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimedWALMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimedWALMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Msg != nil { + { + size, err := m.Msg.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintWal(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintWal(dAtA []byte, offset int, v uint64) int { + offset -= sovWal(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Msg.Size() + n += 1 + l + sovWal(uint64(l)) + l = len(m.PeerID) + if l > 0 { + n += 1 + l + sovWal(uint64(l)) + } + return n +} + +func (m *TimeoutInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovWal(uint64(l)) + if m.Height != 0 { + n += 1 + sovWal(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovWal(uint64(m.Round)) + } + if m.Step != 0 { + n += 1 + sovWal(uint64(m.Step)) + } + return n +} + +func (m *EndHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovWal(uint64(m.Height)) + } + return n +} + +func (m *WALMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *WALMessage_EventDataRoundState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EventDataRoundState != nil { + l = m.EventDataRoundState.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_MsgInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MsgInfo != nil { + l = m.MsgInfo.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_TimeoutInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutInfo != nil { + l = m.TimeoutInfo.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *WALMessage_EndHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndHeight != nil { + l = m.EndHeight.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} +func (m *TimedWALMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovWal(uint64(l)) + if m.Msg != nil { + l = m.Msg.Size() + n += 1 + l + sovWal(uint64(l)) + } + return n +} + +func sovWal(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWal(x uint64) (n int) { + return sovWal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Msg.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeoutInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeoutInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeoutInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + m.Step = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Step |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndHeight) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndHeight: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndHeight: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WALMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WALMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WALMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventDataRoundState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &types.EventDataRoundState{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_EventDataRoundState{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MsgInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MsgInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_MsgInfo{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TimeoutInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_TimeoutInfo{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &EndHeight{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &WALMessage_EndHeight{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimedWALMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimedWALMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimedWALMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Msg == nil { + m.Msg = &WALMessage{} + } + if err := m.Msg.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWal + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWal + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWal + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWal = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWal = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/consensus/wal.proto b/proto/tendermint/consensus/wal.proto new file mode 100644 index 000000000..44afa2c0c --- /dev/null +++ b/proto/tendermint/consensus/wal.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; +package tendermint.consensus; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/consensus"; + +import "gogoproto/gogo.proto"; +import "tendermint/consensus/types.proto"; +import "tendermint/types/events.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// MsgInfo are msgs from the reactor which may update the state +message MsgInfo { + Message msg = 1 [(gogoproto.nullable) = false]; + string peer_id = 2 [(gogoproto.customname) = "PeerID"]; +} + +// TimeoutInfo internally generated messages which may update the state +message TimeoutInfo { + google.protobuf.Duration duration = 1 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + int64 height = 2; + int32 round = 3; + uint32 step = 4; +} + +// EndHeight marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +message EndHeight { + int64 height = 1; +} + +message WALMessage { + oneof sum { + tendermint.types.EventDataRoundState event_data_round_state = 1; + MsgInfo msg_info = 2; + TimeoutInfo timeout_info = 3; + EndHeight end_height = 4; + } +} + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +message TimedWALMessage { + google.protobuf.Timestamp time = 1 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + WALMessage msg = 2; +} diff --git a/proto/tendermint/crypto/keys.pb.go b/proto/tendermint/crypto/keys.pb.go new file mode 100644 index 000000000..2e7282b0a --- /dev/null +++ b/proto/tendermint/crypto/keys.pb.go @@ -0,0 +1,658 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/crypto/keys.proto + +package crypto + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PublicKey defines the keys available for use with Tendermint Validators +type PublicKey struct { + // Types that are valid to be assigned to Sum: + // *PublicKey_Ed25519 + // *PublicKey_Secp256K1 + Sum isPublicKey_Sum `protobuf_oneof:"sum"` +} + +func (m *PublicKey) Reset() { *m = PublicKey{} } +func (m *PublicKey) String() string { return proto.CompactTextString(m) } +func (*PublicKey) ProtoMessage() {} +func (*PublicKey) Descriptor() ([]byte, []int) { + return fileDescriptor_cb048658b234868c, []int{0} +} +func (m *PublicKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PublicKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PublicKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PublicKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicKey.Merge(m, src) +} +func (m *PublicKey) XXX_Size() int { + return m.Size() +} +func (m *PublicKey) XXX_DiscardUnknown() { + xxx_messageInfo_PublicKey.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicKey proto.InternalMessageInfo + +type isPublicKey_Sum interface { + isPublicKey_Sum() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type PublicKey_Ed25519 struct { + Ed25519 []byte `protobuf:"bytes,1,opt,name=ed25519,proto3,oneof" json:"ed25519,omitempty"` +} +type PublicKey_Secp256K1 struct { + Secp256K1 []byte `protobuf:"bytes,2,opt,name=secp256k1,proto3,oneof" json:"secp256k1,omitempty"` +} + +func (*PublicKey_Ed25519) isPublicKey_Sum() {} +func (*PublicKey_Secp256K1) isPublicKey_Sum() {} + +func (m *PublicKey) GetSum() isPublicKey_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *PublicKey) GetEd25519() []byte { + if x, ok := m.GetSum().(*PublicKey_Ed25519); ok { + return x.Ed25519 + } + return nil +} + +func (m *PublicKey) GetSecp256K1() []byte { + if x, ok := m.GetSum().(*PublicKey_Secp256K1); ok { + return x.Secp256K1 + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PublicKey) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*PublicKey_Ed25519)(nil), + (*PublicKey_Secp256K1)(nil), + } +} + +func init() { + proto.RegisterType((*PublicKey)(nil), "tendermint.crypto.PublicKey") +} + +func init() { proto.RegisterFile("tendermint/crypto/keys.proto", fileDescriptor_cb048658b234868c) } + +var fileDescriptor_cb048658b234868c = []byte{ + // 199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0xcf, 0x4e, + 0xad, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x44, 0xc8, 0xea, 0x41, 0x64, 0xa5, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x52, 0x04, 0x17, 0x67, + 0x40, 0x69, 0x52, 0x4e, 0x66, 0xb2, 0x77, 0x6a, 0xa5, 0x90, 0x14, 0x17, 0x7b, 0x6a, 0x8a, 0x91, + 0xa9, 0xa9, 0xa1, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x8f, 0x07, 0x43, 0x10, 0x4c, 0x40, 0x48, + 0x8e, 0x8b, 0xb3, 0x38, 0x35, 0xb9, 0xc0, 0xc8, 0xd4, 0x2c, 0xdb, 0x50, 0x82, 0x09, 0x2a, 0x8b, + 0x10, 0xb2, 0xe2, 0x78, 0xb1, 0x40, 0x9e, 0xf1, 0xc5, 0x42, 0x79, 0x46, 0x27, 0x56, 0x2e, 0xe6, + 0xe2, 0xd2, 0x5c, 0xa7, 0xa0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, + 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, + 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0xf2, 0x05, 0x12, 0x13, + 0xe2, 0x4c, 0x0c, 0x1f, 0x26, 0xb1, 0x81, 0x25, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, + 0x1d, 0x1e, 0xe2, 0xfd, 0x00, 0x00, 0x00, +} + +func (this *PublicKey) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey) + if !ok { + that2, ok := that.(PublicKey) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Sum == nil { + if this.Sum != nil { + return 1 + } + } else if this.Sum == nil { + return -1 + } else { + thisType := -1 + switch this.Sum.(type) { + case *PublicKey_Ed25519: + thisType = 0 + case *PublicKey_Secp256K1: + thisType = 1 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Sum)) + } + that1Type := -1 + switch that1.Sum.(type) { + case *PublicKey_Ed25519: + that1Type = 0 + case *PublicKey_Secp256K1: + that1Type = 1 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Sum)) + } + if thisType == that1Type { + if c := this.Sum.Compare(that1.Sum); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + return 0 +} +func (this *PublicKey_Ed25519) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey_Ed25519) + if !ok { + that2, ok := that.(PublicKey_Ed25519) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Ed25519, that1.Ed25519); c != 0 { + return c + } + return 0 +} +func (this *PublicKey_Secp256K1) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey_Secp256K1) + if !ok { + that2, ok := that.(PublicKey_Secp256K1) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Secp256K1, that1.Secp256K1); c != 0 { + return c + } + return 0 +} +func (this *PublicKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey) + if !ok { + that2, ok := that.(PublicKey) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Sum == nil { + if this.Sum != nil { + return false + } + } else if this.Sum == nil { + return false + } else if !this.Sum.Equal(that1.Sum) { + return false + } + return true +} +func (this *PublicKey_Ed25519) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey_Ed25519) + if !ok { + that2, ok := that.(PublicKey_Ed25519) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Ed25519, that1.Ed25519) { + return false + } + return true +} +func (this *PublicKey_Secp256K1) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey_Secp256K1) + if !ok { + that2, ok := that.(PublicKey_Secp256K1) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Secp256K1, that1.Secp256K1) { + return false + } + return true +} +func (m *PublicKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublicKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *PublicKey_Ed25519) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey_Ed25519) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ed25519 != nil { + i -= len(m.Ed25519) + copy(dAtA[i:], m.Ed25519) + i = encodeVarintKeys(dAtA, i, uint64(len(m.Ed25519))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *PublicKey_Secp256K1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey_Secp256K1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Secp256K1 != nil { + i -= len(m.Secp256K1) + copy(dAtA[i:], m.Secp256K1) + i = encodeVarintKeys(dAtA, i, uint64(len(m.Secp256K1))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func encodeVarintKeys(dAtA []byte, offset int, v uint64) int { + offset -= sovKeys(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PublicKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *PublicKey_Ed25519) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ed25519 != nil { + l = len(m.Ed25519) + n += 1 + l + sovKeys(uint64(l)) + } + return n +} +func (m *PublicKey_Secp256K1) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secp256K1 != nil { + l = len(m.Secp256K1) + n += 1 + l + sovKeys(uint64(l)) + } + return n +} + +func sovKeys(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozKeys(x uint64) (n int) { + return sovKeys(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PublicKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKeys + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublicKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublicKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ed25519", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKeys + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKeys + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKeys + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Sum = &PublicKey_Ed25519{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secp256K1", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowKeys + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthKeys + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthKeys + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Sum = &PublicKey_Secp256K1{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipKeys(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthKeys + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipKeys(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKeys + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKeys + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowKeys + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthKeys + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupKeys + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthKeys + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthKeys = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowKeys = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupKeys = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/crypto/keys.proto b/proto/tendermint/crypto/keys.proto new file mode 100644 index 000000000..16fd7adf3 --- /dev/null +++ b/proto/tendermint/crypto/keys.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package tendermint.crypto; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + +import "gogoproto/gogo.proto"; + +// PublicKey defines the keys available for use with Tendermint Validators +message PublicKey { + option (gogoproto.compare) = true; + option (gogoproto.equal) = true; + + oneof sum { + bytes ed25519 = 1; + bytes secp256k1 = 2; + } +} diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go new file mode 100644 index 000000000..97350c64c --- /dev/null +++ b/proto/tendermint/crypto/proof.pb.go @@ -0,0 +1,1436 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/crypto/proof.proto + +package crypto + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Proof struct { + Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Index int64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + LeafHash []byte `protobuf:"bytes,3,opt,name=leaf_hash,json=leafHash,proto3" json:"leaf_hash,omitempty"` + Aunts [][]byte `protobuf:"bytes,4,rep,name=aunts,proto3" json:"aunts,omitempty"` +} + +func (m *Proof) Reset() { *m = Proof{} } +func (m *Proof) String() string { return proto.CompactTextString(m) } +func (*Proof) ProtoMessage() {} +func (*Proof) Descriptor() ([]byte, []int) { + return fileDescriptor_6b60b6ba2ab5b856, []int{0} +} +func (m *Proof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Proof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proof.Merge(m, src) +} +func (m *Proof) XXX_Size() int { + return m.Size() +} +func (m *Proof) XXX_DiscardUnknown() { + xxx_messageInfo_Proof.DiscardUnknown(m) +} + +var xxx_messageInfo_Proof proto.InternalMessageInfo + +func (m *Proof) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *Proof) GetIndex() int64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Proof) GetLeafHash() []byte { + if m != nil { + return m.LeafHash + } + return nil +} + +func (m *Proof) GetAunts() [][]byte { + if m != nil { + return m.Aunts + } + return nil +} + +type ValueOp struct { + // Encoded in ProofOp.Key. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // To encode in ProofOp.Data + Proof *Proof `protobuf:"bytes,2,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *ValueOp) Reset() { *m = ValueOp{} } +func (m *ValueOp) String() string { return proto.CompactTextString(m) } +func (*ValueOp) ProtoMessage() {} +func (*ValueOp) Descriptor() ([]byte, []int) { + return fileDescriptor_6b60b6ba2ab5b856, []int{1} +} +func (m *ValueOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValueOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValueOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueOp.Merge(m, src) +} +func (m *ValueOp) XXX_Size() int { + return m.Size() +} +func (m *ValueOp) XXX_DiscardUnknown() { + xxx_messageInfo_ValueOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueOp proto.InternalMessageInfo + +func (m *ValueOp) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ValueOp) GetProof() *Proof { + if m != nil { + return m.Proof + } + return nil +} + +type DominoOp struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Input string `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` + Output string `protobuf:"bytes,3,opt,name=output,proto3" json:"output,omitempty"` +} + +func (m *DominoOp) Reset() { *m = DominoOp{} } +func (m *DominoOp) String() string { return proto.CompactTextString(m) } +func (*DominoOp) ProtoMessage() {} +func (*DominoOp) Descriptor() ([]byte, []int) { + return fileDescriptor_6b60b6ba2ab5b856, []int{2} +} +func (m *DominoOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DominoOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DominoOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DominoOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_DominoOp.Merge(m, src) +} +func (m *DominoOp) XXX_Size() int { + return m.Size() +} +func (m *DominoOp) XXX_DiscardUnknown() { + xxx_messageInfo_DominoOp.DiscardUnknown(m) +} + +var xxx_messageInfo_DominoOp proto.InternalMessageInfo + +func (m *DominoOp) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *DominoOp) GetInput() string { + if m != nil { + return m.Input + } + return "" +} + +func (m *DominoOp) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +type ProofOp struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ProofOp) Reset() { *m = ProofOp{} } +func (m *ProofOp) String() string { return proto.CompactTextString(m) } +func (*ProofOp) ProtoMessage() {} +func (*ProofOp) Descriptor() ([]byte, []int) { + return fileDescriptor_6b60b6ba2ab5b856, []int{3} +} +func (m *ProofOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOp.Merge(m, src) +} +func (m *ProofOp) XXX_Size() int { + return m.Size() +} +func (m *ProofOp) XXX_DiscardUnknown() { + xxx_messageInfo_ProofOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofOp proto.InternalMessageInfo + +func (m *ProofOp) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ProofOp) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ProofOp) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// ProofOps is Merkle proof defined by the list of ProofOps +type ProofOps struct { + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops,proto3" json:"ops"` +} + +func (m *ProofOps) Reset() { *m = ProofOps{} } +func (m *ProofOps) String() string { return proto.CompactTextString(m) } +func (*ProofOps) ProtoMessage() {} +func (*ProofOps) Descriptor() ([]byte, []int) { + return fileDescriptor_6b60b6ba2ab5b856, []int{4} +} +func (m *ProofOps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofOps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofOps.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofOps) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOps.Merge(m, src) +} +func (m *ProofOps) XXX_Size() int { + return m.Size() +} +func (m *ProofOps) XXX_DiscardUnknown() { + xxx_messageInfo_ProofOps.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofOps proto.InternalMessageInfo + +func (m *ProofOps) GetOps() []ProofOp { + if m != nil { + return m.Ops + } + return nil +} + +func init() { + proto.RegisterType((*Proof)(nil), "tendermint.crypto.Proof") + proto.RegisterType((*ValueOp)(nil), "tendermint.crypto.ValueOp") + proto.RegisterType((*DominoOp)(nil), "tendermint.crypto.DominoOp") + proto.RegisterType((*ProofOp)(nil), "tendermint.crypto.ProofOp") + proto.RegisterType((*ProofOps)(nil), "tendermint.crypto.ProofOps") +} + +func init() { proto.RegisterFile("tendermint/crypto/proof.proto", fileDescriptor_6b60b6ba2ab5b856) } + +var fileDescriptor_6b60b6ba2ab5b856 = []byte{ + // 351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xbb, 0x4e, 0xc3, 0x30, + 0x14, 0x4d, 0xea, 0xf4, 0x75, 0xdb, 0x01, 0xac, 0x0a, 0x45, 0x45, 0x84, 0x28, 0x53, 0xa6, 0x44, + 0x2a, 0x0b, 0x13, 0x43, 0x61, 0x40, 0x30, 0x14, 0x79, 0x60, 0x60, 0x41, 0x6e, 0xeb, 0x36, 0x11, + 0x6d, 0x6c, 0x25, 0x8e, 0x44, 0xff, 0x82, 0xcf, 0xea, 0xd8, 0x91, 0x09, 0xa1, 0xf6, 0x47, 0x90, + 0xed, 0xa0, 0x16, 0x55, 0x6c, 0xe7, 0x71, 0x7d, 0x7c, 0xac, 0x6b, 0xb8, 0x90, 0x2c, 0x9b, 0xb2, + 0x7c, 0x99, 0x66, 0x32, 0x9e, 0xe4, 0x2b, 0x21, 0x79, 0x2c, 0x72, 0xce, 0x67, 0x91, 0xc8, 0xb9, + 0xe4, 0xf8, 0x74, 0x6f, 0x47, 0xc6, 0xee, 0xf7, 0xe6, 0x7c, 0xce, 0xb5, 0x1b, 0x2b, 0x64, 0x06, + 0x83, 0x19, 0xd4, 0x9f, 0xd4, 0x39, 0xdc, 0x83, 0xba, 0xe4, 0x92, 0x2e, 0x5c, 0xdb, 0xb7, 0x43, + 0x44, 0x0c, 0x51, 0x6a, 0x9a, 0x4d, 0xd9, 0xbb, 0x5b, 0x33, 0xaa, 0x26, 0xf8, 0x1c, 0xda, 0x0b, + 0x46, 0x67, 0xaf, 0x09, 0x2d, 0x12, 0x17, 0xf9, 0x76, 0xd8, 0x25, 0x2d, 0x25, 0xdc, 0xd3, 0x22, + 0x51, 0x47, 0x68, 0x99, 0xc9, 0xc2, 0x75, 0x7c, 0x14, 0x76, 0x89, 0x21, 0xc1, 0x23, 0x34, 0x9f, + 0xe9, 0xa2, 0x64, 0x23, 0x81, 0x4f, 0x00, 0xbd, 0xb1, 0x95, 0xbe, 0xa7, 0x4b, 0x14, 0xc4, 0x11, + 0xd4, 0x75, 0x79, 0x7d, 0x4b, 0x67, 0xe0, 0x46, 0x47, 0xed, 0x23, 0x5d, 0x92, 0x98, 0xb1, 0xe0, + 0x01, 0x5a, 0x77, 0x7c, 0x99, 0x66, 0xfc, 0x6f, 0x5a, 0xdb, 0xa4, 0xe9, 0xce, 0xa2, 0x94, 0x3a, + 0xad, 0x4d, 0x0c, 0xc1, 0x67, 0xd0, 0xe0, 0xa5, 0x54, 0x32, 0xd2, 0x72, 0xc5, 0x82, 0x5b, 0x68, + 0xea, 0xec, 0x91, 0xc0, 0x18, 0x1c, 0xb9, 0x12, 0xac, 0xca, 0xd2, 0xf8, 0x37, 0xbe, 0xb6, 0x2f, + 0x8b, 0xc1, 0x99, 0x52, 0x49, 0xab, 0x77, 0x6b, 0x1c, 0xdc, 0x40, 0xab, 0x0a, 0x29, 0xf0, 0x00, + 0x10, 0x17, 0x85, 0x6b, 0xfb, 0x28, 0xec, 0x0c, 0xfa, 0xff, 0x3d, 0x65, 0x24, 0x86, 0xce, 0xfa, + 0xeb, 0xd2, 0x22, 0x6a, 0x78, 0x48, 0xd6, 0x5b, 0xcf, 0xde, 0x6c, 0x3d, 0xfb, 0x7b, 0xeb, 0xd9, + 0x1f, 0x3b, 0xcf, 0xda, 0xec, 0x3c, 0xeb, 0x73, 0xe7, 0x59, 0x2f, 0xd7, 0xf3, 0x54, 0x26, 0xe5, + 0x38, 0x9a, 0xf0, 0x65, 0x7c, 0xb0, 0xf2, 0x03, 0x68, 0x56, 0x7a, 0xf4, 0x1d, 0xc6, 0x0d, 0x6d, + 0x5c, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0x43, 0x5d, 0xb9, 0x45, 0x2a, 0x02, 0x00, 0x00, +} + +func (m *Proof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Aunts) > 0 { + for iNdEx := len(m.Aunts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Aunts[iNdEx]) + copy(dAtA[i:], m.Aunts[iNdEx]) + i = encodeVarintProof(dAtA, i, uint64(len(m.Aunts[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.LeafHash) > 0 { + i -= len(m.LeafHash) + copy(dAtA[i:], m.LeafHash) + i = encodeVarintProof(dAtA, i, uint64(len(m.LeafHash))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintProof(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if m.Total != 0 { + i = encodeVarintProof(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValueOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValueOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProof(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DominoOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DominoOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DominoOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Output) > 0 { + i -= len(m.Output) + copy(dAtA[i:], m.Output) + i = encodeVarintProof(dAtA, i, uint64(len(m.Output))) + i-- + dAtA[i] = 0x1a + } + if len(m.Input) > 0 { + i -= len(m.Input) + copy(dAtA[i:], m.Input) + i = encodeVarintProof(dAtA, i, uint64(len(m.Input))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProof(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ProofOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintProof(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProof(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintProof(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ProofOps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofOps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofOps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ops) > 0 { + for iNdEx := len(m.Ops) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ops[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintProof(dAtA []byte, offset int, v uint64) int { + offset -= sovProof(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Proof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovProof(uint64(m.Total)) + } + if m.Index != 0 { + n += 1 + sovProof(uint64(m.Index)) + } + l = len(m.LeafHash) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + if len(m.Aunts) > 0 { + for _, b := range m.Aunts { + l = len(b) + n += 1 + l + sovProof(uint64(l)) + } + } + return n +} + +func (m *ValueOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *DominoOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.Input) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.Output) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *ProofOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *ProofOps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ops) > 0 { + for _, e := range m.Ops { + l = e.Size() + n += 1 + l + sovProof(uint64(l)) + } + } + return n +} + +func sovProof(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProof(x uint64) (n int) { + return sovProof(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Proof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeafHash = append(m.LeafHash[:0], dAtA[iNdEx:postIndex]...) + if m.LeafHash == nil { + m.LeafHash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aunts", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aunts = append(m.Aunts, make([]byte, postIndex-iNdEx)) + copy(m.Aunts[len(m.Aunts)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValueOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DominoOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DominoOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DominoOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Input = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Output = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofOps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofOps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofOps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ops = append(m.Ops, ProofOp{}) + if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProof(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProof + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProof + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProof + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProof = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProof = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProof = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto new file mode 100644 index 000000000..975df7685 --- /dev/null +++ b/proto/tendermint/crypto/proof.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; +package tendermint.crypto; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/crypto"; + +import "gogoproto/gogo.proto"; + +message Proof { + int64 total = 1; + int64 index = 2; + bytes leaf_hash = 3; + repeated bytes aunts = 4; +} + +message ValueOp { + // Encoded in ProofOp.Key. + bytes key = 1; + + // To encode in ProofOp.Data + Proof proof = 2; +} + +message DominoOp { + string key = 1; + string input = 2; + string output = 3; +} + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// ProofOps is Merkle proof defined by the list of ProofOps +message ProofOps { + repeated ProofOp ops = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/tendermint/libs/bits/types.pb.go b/proto/tendermint/libs/bits/types.pb.go new file mode 100644 index 000000000..ad87f854f --- /dev/null +++ b/proto/tendermint/libs/bits/types.pb.go @@ -0,0 +1,411 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/libs/bits/types.proto + +package bits + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type BitArray struct { + Bits int64 `protobuf:"varint,1,opt,name=bits,proto3" json:"bits,omitempty"` + Elems []uint64 `protobuf:"varint,2,rep,packed,name=elems,proto3" json:"elems,omitempty"` +} + +func (m *BitArray) Reset() { *m = BitArray{} } +func (m *BitArray) String() string { return proto.CompactTextString(m) } +func (*BitArray) ProtoMessage() {} +func (*BitArray) Descriptor() ([]byte, []int) { + return fileDescriptor_e91ab2672920d7d4, []int{0} +} +func (m *BitArray) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BitArray.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BitArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitArray.Merge(m, src) +} +func (m *BitArray) XXX_Size() int { + return m.Size() +} +func (m *BitArray) XXX_DiscardUnknown() { + xxx_messageInfo_BitArray.DiscardUnknown(m) +} + +var xxx_messageInfo_BitArray proto.InternalMessageInfo + +func (m *BitArray) GetBits() int64 { + if m != nil { + return m.Bits + } + return 0 +} + +func (m *BitArray) GetElems() []uint64 { + if m != nil { + return m.Elems + } + return nil +} + +func init() { + proto.RegisterType((*BitArray)(nil), "tendermint.libs.bits.BitArray") +} + +func init() { proto.RegisterFile("tendermint/libs/bits/types.proto", fileDescriptor_e91ab2672920d7d4) } + +var fileDescriptor_e91ab2672920d7d4 = []byte{ + // 168 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0xc9, 0x4c, 0x2a, 0xd6, 0x4f, 0xca, 0x2c, 0x29, + 0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x41, 0xa8, + 0xd0, 0x03, 0xa9, 0xd0, 0x03, 0xa9, 0x50, 0x32, 0xe1, 0xe2, 0x70, 0xca, 0x2c, 0x71, 0x2c, 0x2a, + 0x4a, 0xac, 0x14, 0x12, 0xe2, 0x62, 0x01, 0x89, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x81, + 0xd9, 0x42, 0x22, 0x5c, 0xac, 0xa9, 0x39, 0xa9, 0xb9, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, 0x1a, 0x2c, + 0x41, 0x10, 0x8e, 0x53, 0xe8, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, + 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, + 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x23, 0x39, 0x09, 0x89, 0x09, + 0x76, 0x8d, 0x3e, 0x36, 0xe7, 0x26, 0xb1, 0x81, 0xe5, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x5b, 0x0c, 0xe3, 0x3e, 0xcd, 0x00, 0x00, 0x00, +} + +func (m *BitArray) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitArray) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitArray) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Elems) > 0 { + dAtA2 := make([]byte, len(m.Elems)*10) + var j1 int + for _, num := range m.Elems { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintTypes(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x12 + } + if m.Bits != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Bits)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BitArray) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Bits != 0 { + n += 1 + sovTypes(uint64(m.Bits)) + } + if len(m.Elems) > 0 { + l = 0 + for _, e := range m.Elems { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BitArray) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Bits", wireType) + } + m.Bits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Bits |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Elems = append(m.Elems, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Elems) == 0 { + m.Elems = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Elems = append(m.Elems, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Elems", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/libs/bits/types.proto b/proto/tendermint/libs/bits/types.proto new file mode 100644 index 000000000..3111d113a --- /dev/null +++ b/proto/tendermint/libs/bits/types.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package tendermint.libs.bits; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/libs/bits"; + +message BitArray { + int64 bits = 1; + repeated uint64 elems = 2; +} diff --git a/proto/tendermint/mempool/types.pb.go b/proto/tendermint/mempool/types.pb.go new file mode 100644 index 000000000..3487652bc --- /dev/null +++ b/proto/tendermint/mempool/types.pb.go @@ -0,0 +1,562 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/mempool/types.proto + +package mempool + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Txs struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *Txs) Reset() { *m = Txs{} } +func (m *Txs) String() string { return proto.CompactTextString(m) } +func (*Txs) ProtoMessage() {} +func (*Txs) Descriptor() ([]byte, []int) { + return fileDescriptor_2af51926fdbcbc05, []int{0} +} +func (m *Txs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Txs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Txs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Txs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Txs.Merge(m, src) +} +func (m *Txs) XXX_Size() int { + return m.Size() +} +func (m *Txs) XXX_DiscardUnknown() { + xxx_messageInfo_Txs.DiscardUnknown(m) +} + +var xxx_messageInfo_Txs proto.InternalMessageInfo + +func (m *Txs) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_Txs + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_2af51926fdbcbc05, []int{1} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_Txs struct { + Txs *Txs `protobuf:"bytes,1,opt,name=txs,proto3,oneof" json:"txs,omitempty"` +} + +func (*Message_Txs) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetTxs() *Txs { + if x, ok := m.GetSum().(*Message_Txs); ok { + return x.Txs + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_Txs)(nil), + } +} + +func init() { + proto.RegisterType((*Txs)(nil), "tendermint.mempool.Txs") + proto.RegisterType((*Message)(nil), "tendermint.mempool.Message") +} + +func init() { proto.RegisterFile("tendermint/mempool/types.proto", fileDescriptor_2af51926fdbcbc05) } + +var fileDescriptor_2af51926fdbcbc05 = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0xcf, 0x4d, 0xcd, 0x2d, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, + 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, + 0xe5, 0x95, 0xc4, 0xb9, 0x98, 0x43, 0x2a, 0x8a, 0x85, 0x04, 0xb8, 0x98, 0x4b, 0x2a, 0x8a, 0x25, + 0x18, 0x15, 0x98, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x25, 0x5b, 0x2e, 0x76, 0xdf, 0xd4, 0xe2, 0xe2, + 0xc4, 0xf4, 0x54, 0x21, 0x6d, 0x98, 0x24, 0xa3, 0x06, 0xb7, 0x91, 0xb8, 0x1e, 0xa6, 0x29, 0x7a, + 0x21, 0x15, 0xc5, 0x1e, 0x0c, 0x60, 0x7d, 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xc1, + 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, + 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x99, 0x9e, 0x59, 0x92, 0x51, + 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0xe4, 0x60, 0x24, 0x26, 0xd8, 0xb5, 0xfa, 0x98, 0x9e, + 0x49, 0x62, 0x03, 0xcb, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xca, 0xc3, 0xa0, 0xfc, 0xe9, + 0x00, 0x00, 0x00, +} + +func (m *Txs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Txs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_Txs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Txs != nil { + { + size, err := m.Txs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Txs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_Txs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Txs != nil { + l = m.Txs.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Txs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Txs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Txs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Txs{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Txs{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/mempool/types.proto b/proto/tendermint/mempool/types.proto new file mode 100644 index 000000000..b55d9717b --- /dev/null +++ b/proto/tendermint/mempool/types.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package tendermint.mempool; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/mempool"; + +message Txs { + repeated bytes txs = 1; +} + +message Message { + oneof sum { + Txs txs = 1; + } +} diff --git a/proto/tendermint/p2p/conn.pb.go b/proto/tendermint/p2p/conn.pb.go new file mode 100644 index 000000000..7c26d3fcd --- /dev/null +++ b/proto/tendermint/p2p/conn.pb.go @@ -0,0 +1,1284 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/p2p/conn.proto + +package p2p + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PacketPing struct { +} + +func (m *PacketPing) Reset() { *m = PacketPing{} } +func (m *PacketPing) String() string { return proto.CompactTextString(m) } +func (*PacketPing) ProtoMessage() {} +func (*PacketPing) Descriptor() ([]byte, []int) { + return fileDescriptor_22474b5527c8fa9f, []int{0} +} +func (m *PacketPing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketPing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketPing) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketPing.Merge(m, src) +} +func (m *PacketPing) XXX_Size() int { + return m.Size() +} +func (m *PacketPing) XXX_DiscardUnknown() { + xxx_messageInfo_PacketPing.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketPing proto.InternalMessageInfo + +type PacketPong struct { +} + +func (m *PacketPong) Reset() { *m = PacketPong{} } +func (m *PacketPong) String() string { return proto.CompactTextString(m) } +func (*PacketPong) ProtoMessage() {} +func (*PacketPong) Descriptor() ([]byte, []int) { + return fileDescriptor_22474b5527c8fa9f, []int{1} +} +func (m *PacketPong) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketPong) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketPong.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketPong) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketPong.Merge(m, src) +} +func (m *PacketPong) XXX_Size() int { + return m.Size() +} +func (m *PacketPong) XXX_DiscardUnknown() { + xxx_messageInfo_PacketPong.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketPong proto.InternalMessageInfo + +type PacketMsg struct { + ChannelID int32 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + EOF bool `protobuf:"varint,2,opt,name=eof,proto3" json:"eof,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *PacketMsg) Reset() { *m = PacketMsg{} } +func (m *PacketMsg) String() string { return proto.CompactTextString(m) } +func (*PacketMsg) ProtoMessage() {} +func (*PacketMsg) Descriptor() ([]byte, []int) { + return fileDescriptor_22474b5527c8fa9f, []int{2} +} +func (m *PacketMsg) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PacketMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PacketMsg.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PacketMsg) XXX_Merge(src proto.Message) { + xxx_messageInfo_PacketMsg.Merge(m, src) +} +func (m *PacketMsg) XXX_Size() int { + return m.Size() +} +func (m *PacketMsg) XXX_DiscardUnknown() { + xxx_messageInfo_PacketMsg.DiscardUnknown(m) +} + +var xxx_messageInfo_PacketMsg proto.InternalMessageInfo + +func (m *PacketMsg) GetChannelID() int32 { + if m != nil { + return m.ChannelID + } + return 0 +} + +func (m *PacketMsg) GetEOF() bool { + if m != nil { + return m.EOF + } + return false +} + +func (m *PacketMsg) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type Packet struct { + // Types that are valid to be assigned to Sum: + // *Packet_PacketPing + // *Packet_PacketPong + // *Packet_PacketMsg + Sum isPacket_Sum `protobuf_oneof:"sum"` +} + +func (m *Packet) Reset() { *m = Packet{} } +func (m *Packet) String() string { return proto.CompactTextString(m) } +func (*Packet) ProtoMessage() {} +func (*Packet) Descriptor() ([]byte, []int) { + return fileDescriptor_22474b5527c8fa9f, []int{3} +} +func (m *Packet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Packet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Packet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Packet.Merge(m, src) +} +func (m *Packet) XXX_Size() int { + return m.Size() +} +func (m *Packet) XXX_DiscardUnknown() { + xxx_messageInfo_Packet.DiscardUnknown(m) +} + +var xxx_messageInfo_Packet proto.InternalMessageInfo + +type isPacket_Sum interface { + isPacket_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Packet_PacketPing struct { + PacketPing *PacketPing `protobuf:"bytes,1,opt,name=packet_ping,json=packetPing,proto3,oneof" json:"packet_ping,omitempty"` +} +type Packet_PacketPong struct { + PacketPong *PacketPong `protobuf:"bytes,2,opt,name=packet_pong,json=packetPong,proto3,oneof" json:"packet_pong,omitempty"` +} +type Packet_PacketMsg struct { + PacketMsg *PacketMsg `protobuf:"bytes,3,opt,name=packet_msg,json=packetMsg,proto3,oneof" json:"packet_msg,omitempty"` +} + +func (*Packet_PacketPing) isPacket_Sum() {} +func (*Packet_PacketPong) isPacket_Sum() {} +func (*Packet_PacketMsg) isPacket_Sum() {} + +func (m *Packet) GetSum() isPacket_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Packet) GetPacketPing() *PacketPing { + if x, ok := m.GetSum().(*Packet_PacketPing); ok { + return x.PacketPing + } + return nil +} + +func (m *Packet) GetPacketPong() *PacketPong { + if x, ok := m.GetSum().(*Packet_PacketPong); ok { + return x.PacketPong + } + return nil +} + +func (m *Packet) GetPacketMsg() *PacketMsg { + if x, ok := m.GetSum().(*Packet_PacketMsg); ok { + return x.PacketMsg + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Packet) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Packet_PacketPing)(nil), + (*Packet_PacketPong)(nil), + (*Packet_PacketMsg)(nil), + } +} + +type AuthSigMessage struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` +} + +func (m *AuthSigMessage) Reset() { *m = AuthSigMessage{} } +func (m *AuthSigMessage) String() string { return proto.CompactTextString(m) } +func (*AuthSigMessage) ProtoMessage() {} +func (*AuthSigMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_22474b5527c8fa9f, []int{4} +} +func (m *AuthSigMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthSigMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthSigMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthSigMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthSigMessage.Merge(m, src) +} +func (m *AuthSigMessage) XXX_Size() int { + return m.Size() +} +func (m *AuthSigMessage) XXX_DiscardUnknown() { + xxx_messageInfo_AuthSigMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthSigMessage proto.InternalMessageInfo + +func (m *AuthSigMessage) GetPubKey() crypto.PublicKey { + if m != nil { + return m.PubKey + } + return crypto.PublicKey{} +} + +func (m *AuthSigMessage) GetSig() []byte { + if m != nil { + return m.Sig + } + return nil +} + +func init() { + proto.RegisterType((*PacketPing)(nil), "tendermint.p2p.PacketPing") + proto.RegisterType((*PacketPong)(nil), "tendermint.p2p.PacketPong") + proto.RegisterType((*PacketMsg)(nil), "tendermint.p2p.PacketMsg") + proto.RegisterType((*Packet)(nil), "tendermint.p2p.Packet") + proto.RegisterType((*AuthSigMessage)(nil), "tendermint.p2p.AuthSigMessage") +} + +func init() { proto.RegisterFile("tendermint/p2p/conn.proto", fileDescriptor_22474b5527c8fa9f) } + +var fileDescriptor_22474b5527c8fa9f = []byte{ + // 395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0x3d, 0x8f, 0xd3, 0x40, + 0x10, 0xf5, 0xe2, 0xbb, 0x1c, 0x99, 0x84, 0x13, 0x5a, 0x51, 0x24, 0xd1, 0xc9, 0x89, 0x5c, 0xa5, + 0x40, 0xb6, 0x64, 0x44, 0x03, 0xa2, 0xc0, 0x7c, 0x88, 0xd3, 0x29, 0xba, 0xc8, 0x74, 0x34, 0x96, + 0x3f, 0x96, 0xf5, 0x2a, 0xe7, 0xdd, 0x55, 0x76, 0x5d, 0xf8, 0x5f, 0xf0, 0xb3, 0x8e, 0xee, 0x4a, + 0xaa, 0x08, 0x39, 0x7f, 0x04, 0x79, 0x1d, 0x88, 0x23, 0x71, 0xdd, 0x7b, 0x33, 0xf3, 0xe6, 0x43, + 0xf3, 0x60, 0xaa, 0x09, 0xcf, 0xc9, 0xb6, 0x64, 0x5c, 0xfb, 0x32, 0x90, 0x7e, 0x26, 0x38, 0xf7, + 0xe4, 0x56, 0x68, 0x81, 0x2f, 0x8f, 0x29, 0x4f, 0x06, 0x72, 0xf6, 0x82, 0x0a, 0x2a, 0x4c, 0xca, + 0x6f, 0x51, 0x57, 0x35, 0xbb, 0xea, 0x35, 0xc8, 0xb6, 0xb5, 0xd4, 0xc2, 0xdf, 0x90, 0x5a, 0x75, + 0x59, 0x77, 0x0c, 0xb0, 0x4e, 0xb2, 0x0d, 0xd1, 0x6b, 0xc6, 0x69, 0x8f, 0x09, 0x4e, 0xdd, 0x02, + 0x86, 0x1d, 0x5b, 0x29, 0x8a, 0x5f, 0x02, 0x64, 0x45, 0xc2, 0x39, 0xb9, 0x8b, 0x59, 0x3e, 0x41, + 0x0b, 0xb4, 0x3c, 0x0f, 0x9f, 0x35, 0xbb, 0xf9, 0xf0, 0x43, 0x17, 0xbd, 0xfe, 0x18, 0x0d, 0x0f, + 0x05, 0xd7, 0x39, 0x9e, 0x82, 0x4d, 0xc4, 0xf7, 0xc9, 0x93, 0x05, 0x5a, 0x3e, 0x0d, 0x2f, 0x9a, + 0xdd, 0xdc, 0xfe, 0x74, 0xfb, 0x39, 0x6a, 0x63, 0x18, 0xc3, 0x59, 0x9e, 0xe8, 0x64, 0x62, 0x2f, + 0xd0, 0x72, 0x1c, 0x19, 0xec, 0xfe, 0x44, 0x30, 0xe8, 0x46, 0xe1, 0x77, 0x30, 0x92, 0x06, 0xc5, + 0x92, 0x71, 0x6a, 0x06, 0x8d, 0x82, 0x99, 0x77, 0x7a, 0xaa, 0x77, 0xdc, 0xf9, 0x8b, 0x15, 0x81, + 0xfc, 0xc7, 0xfa, 0x72, 0xc1, 0xa9, 0x59, 0xe0, 0x71, 0xb9, 0x38, 0x91, 0x0b, 0x4e, 0xf1, 0x1b, + 0x38, 0xb0, 0xb8, 0x54, 0xd4, 0xac, 0x38, 0x0a, 0xa6, 0xff, 0x57, 0xaf, 0x54, 0x2b, 0x1e, 0xca, + 0xbf, 0x24, 0x3c, 0x07, 0x5b, 0x55, 0xa5, 0x1b, 0xc3, 0xe5, 0xfb, 0x4a, 0x17, 0x5f, 0x19, 0x5d, + 0x11, 0xa5, 0x12, 0x4a, 0xf0, 0x5b, 0xb8, 0x90, 0x55, 0x1a, 0x6f, 0x48, 0x7d, 0x38, 0xe7, 0xaa, + 0xdf, 0xb1, 0xfb, 0x89, 0xb7, 0xae, 0xd2, 0x3b, 0x96, 0xdd, 0x90, 0x3a, 0x3c, 0xbb, 0xdf, 0xcd, + 0xad, 0x68, 0x20, 0xab, 0xf4, 0x86, 0xd4, 0xf8, 0x39, 0xd8, 0x8a, 0x75, 0x87, 0x8c, 0xa3, 0x16, + 0x86, 0xb7, 0xf7, 0x8d, 0x83, 0x1e, 0x1a, 0x07, 0xfd, 0x6e, 0x1c, 0xf4, 0x63, 0xef, 0x58, 0x0f, + 0x7b, 0xc7, 0xfa, 0xb5, 0x77, 0xac, 0x6f, 0xaf, 0x29, 0xd3, 0x45, 0x95, 0x7a, 0x99, 0x28, 0xfd, + 0xde, 0xd7, 0xfb, 0x0e, 0x32, 0xee, 0x38, 0xb5, 0x54, 0x3a, 0x30, 0xd1, 0x57, 0x7f, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x30, 0xfd, 0xb2, 0x8d, 0x6b, 0x02, 0x00, 0x00, +} + +func (m *PacketPing) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketPing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PacketPong) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketPong) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketPong) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PacketMsg) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PacketMsg) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PacketMsg) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintConn(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.EOF { + i-- + if m.EOF { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ChannelID != 0 { + i = encodeVarintConn(dAtA, i, uint64(m.ChannelID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Packet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Packet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Packet_PacketPing) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet_PacketPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PacketPing != nil { + { + size, err := m.PacketPing.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConn(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Packet_PacketPong) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet_PacketPong) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PacketPong != nil { + { + size, err := m.PacketPong.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConn(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Packet_PacketMsg) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet_PacketMsg) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PacketMsg != nil { + { + size, err := m.PacketMsg.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConn(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *AuthSigMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthSigMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthSigMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sig) > 0 { + i -= len(m.Sig) + copy(dAtA[i:], m.Sig) + i = encodeVarintConn(dAtA, i, uint64(len(m.Sig))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConn(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintConn(dAtA []byte, offset int, v uint64) int { + offset -= sovConn(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PacketPing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PacketPong) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PacketMsg) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChannelID != 0 { + n += 1 + sovConn(uint64(m.ChannelID)) + } + if m.EOF { + n += 2 + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovConn(uint64(l)) + } + return n +} + +func (m *Packet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Packet_PacketPing) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PacketPing != nil { + l = m.PacketPing.Size() + n += 1 + l + sovConn(uint64(l)) + } + return n +} +func (m *Packet_PacketPong) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PacketPong != nil { + l = m.PacketPong.Size() + n += 1 + l + sovConn(uint64(l)) + } + return n +} +func (m *Packet_PacketMsg) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PacketMsg != nil { + l = m.PacketMsg.Size() + n += 1 + l + sovConn(uint64(l)) + } + return n +} +func (m *AuthSigMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovConn(uint64(l)) + l = len(m.Sig) + if l > 0 { + n += 1 + l + sovConn(uint64(l)) + } + return n +} + +func sovConn(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozConn(x uint64) (n int) { + return sovConn(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PacketPing) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketPing: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketPing: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipConn(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketPong) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketPong: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketPong: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipConn(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PacketMsg) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PacketMsg: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PacketMsg: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelID", wireType) + } + m.ChannelID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ChannelID |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EOF", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EOF = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConn(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Packet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Packet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketPing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PacketPing{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Packet_PacketPing{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketPong", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PacketPong{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Packet_PacketPong{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PacketMsg", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PacketMsg{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Packet_PacketMsg{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConn(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthSigMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthSigMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConn + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthConn + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthConn + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sig = append(m.Sig[:0], dAtA[iNdEx:postIndex]...) + if m.Sig == nil { + m.Sig = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConn(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthConn + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipConn(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConn + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConn + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowConn + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthConn + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupConn + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthConn + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthConn = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowConn = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupConn = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/p2p/conn.proto b/proto/tendermint/p2p/conn.proto new file mode 100644 index 000000000..b12de6c82 --- /dev/null +++ b/proto/tendermint/p2p/conn.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package tendermint.p2p; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + +import "gogoproto/gogo.proto"; +import "tendermint/crypto/keys.proto"; + +message PacketPing {} + +message PacketPong {} + +message PacketMsg { + int32 channel_id = 1 [(gogoproto.customname) = "ChannelID"]; + bool eof = 2 [(gogoproto.customname) = "EOF"]; + bytes data = 3; +} + +message Packet { + oneof sum { + PacketPing packet_ping = 1; + PacketPong packet_pong = 2; + PacketMsg packet_msg = 3; + } +} + +message AuthSigMessage { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + bytes sig = 2; +} diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go new file mode 100644 index 000000000..42aa08a20 --- /dev/null +++ b/proto/tendermint/p2p/pex.pb.go @@ -0,0 +1,778 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/p2p/pex.proto + +package p2p + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PexRequest struct { +} + +func (m *PexRequest) Reset() { *m = PexRequest{} } +func (m *PexRequest) String() string { return proto.CompactTextString(m) } +func (*PexRequest) ProtoMessage() {} +func (*PexRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{0} +} +func (m *PexRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PexRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PexRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PexRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PexRequest.Merge(m, src) +} +func (m *PexRequest) XXX_Size() int { + return m.Size() +} +func (m *PexRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PexRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PexRequest proto.InternalMessageInfo + +type PexAddrs struct { + Addrs []NetAddress `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs"` +} + +func (m *PexAddrs) Reset() { *m = PexAddrs{} } +func (m *PexAddrs) String() string { return proto.CompactTextString(m) } +func (*PexAddrs) ProtoMessage() {} +func (*PexAddrs) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{1} +} +func (m *PexAddrs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PexAddrs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PexAddrs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PexAddrs) XXX_Merge(src proto.Message) { + xxx_messageInfo_PexAddrs.Merge(m, src) +} +func (m *PexAddrs) XXX_Size() int { + return m.Size() +} +func (m *PexAddrs) XXX_DiscardUnknown() { + xxx_messageInfo_PexAddrs.DiscardUnknown(m) +} + +var xxx_messageInfo_PexAddrs proto.InternalMessageInfo + +func (m *PexAddrs) GetAddrs() []NetAddress { + if m != nil { + return m.Addrs + } + return nil +} + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_PexRequest + // *Message_PexAddrs + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_81c2f011fd13be57, []int{2} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_PexRequest struct { + PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` +} +type Message_PexAddrs struct { + PexAddrs *PexAddrs `protobuf:"bytes,2,opt,name=pex_addrs,json=pexAddrs,proto3,oneof" json:"pex_addrs,omitempty"` +} + +func (*Message_PexRequest) isMessage_Sum() {} +func (*Message_PexAddrs) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetPexRequest() *PexRequest { + if x, ok := m.GetSum().(*Message_PexRequest); ok { + return x.PexRequest + } + return nil +} + +func (m *Message) GetPexAddrs() *PexAddrs { + if x, ok := m.GetSum().(*Message_PexAddrs); ok { + return x.PexAddrs + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_PexRequest)(nil), + (*Message_PexAddrs)(nil), + } +} + +func init() { + proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") + proto.RegisterType((*PexAddrs)(nil), "tendermint.p2p.PexAddrs") + proto.RegisterType((*Message)(nil), "tendermint.p2p.Message") +} + +func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } + +var fileDescriptor_81c2f011fd13be57 = []byte{ + // 268 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x49, 0xa1, 0xa9, + 0x2c, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0xa8, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, + 0x41, 0x2c, 0x88, 0xa8, 0x12, 0x0f, 0x17, 0x57, 0x40, 0x6a, 0x45, 0x50, 0x6a, 0x61, 0x69, 0x6a, + 0x71, 0x89, 0x92, 0x13, 0x17, 0x47, 0x40, 0x6a, 0x85, 0x63, 0x4a, 0x4a, 0x51, 0xb1, 0x90, 0x19, + 0x17, 0x6b, 0x22, 0x88, 0x21, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, 0xa5, 0x87, 0x6a, 0x97, + 0x9e, 0x5f, 0x6a, 0x09, 0x48, 0x61, 0x6a, 0x71, 0xb1, 0x13, 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, + 0x10, 0xe5, 0x4a, 0x1d, 0x8c, 0x5c, 0xec, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0xb6, + 0x5c, 0xdc, 0x05, 0xa9, 0x15, 0xf1, 0x45, 0x10, 0xe3, 0x25, 0x18, 0x15, 0x18, 0xb1, 0x99, 0x84, + 0x70, 0x80, 0x07, 0x43, 0x10, 0x57, 0x01, 0x9c, 0x27, 0x64, 0xce, 0xc5, 0x09, 0xd2, 0x0e, 0x71, + 0x06, 0x13, 0x58, 0xb3, 0x04, 0x16, 0xcd, 0x60, 0xf7, 0x7a, 0x30, 0x04, 0x71, 0x14, 0x40, 0xd9, + 0x4e, 0xac, 0x5c, 0xcc, 0xc5, 0xa5, 0xb9, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, + 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x10, 0x65, 0x9a, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, + 0x14, 0x66, 0xc8, 0xc1, 0x07, 0x0e, 0x29, 0xd4, 0xf0, 0x4c, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x0b, 0xcb, 0x40, 0x92, 0x01, 0x00, 0x00, +} + +func (m *PexRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PexRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PexAddrs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PexAddrs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PexAddrs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addrs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_PexRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PexRequest != nil { + { + size, err := m.PexRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_PexAddrs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PexAddrs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PexAddrs != nil { + { + size, err := m.PexAddrs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func encodeVarintPex(dAtA []byte, offset int, v uint64) int { + offset -= sovPex(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PexRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PexAddrs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addrs) > 0 { + for _, e := range m.Addrs { + l = e.Size() + n += 1 + l + sovPex(uint64(l)) + } + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_PexRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PexRequest != nil { + l = m.PexRequest.Size() + n += 1 + l + sovPex(uint64(l)) + } + return n +} +func (m *Message_PexAddrs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PexAddrs != nil { + l = m.PexAddrs.Size() + n += 1 + l + sovPex(uint64(l)) + } + return n +} + +func sovPex(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPex(x uint64) (n int) { + return sovPex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PexRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PexRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PexRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipPex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PexAddrs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PexAddrs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PexAddrs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, NetAddress{}) + if err := m.Addrs[len(m.Addrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PexRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PexRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PexAddrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PexAddrs{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PexAddrs{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPex(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPex + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPex + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPex + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPex + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPex = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPex = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPex = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto new file mode 100644 index 000000000..dfe238dbe --- /dev/null +++ b/proto/tendermint/p2p/pex.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; +package tendermint.p2p; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + +import "tendermint/p2p/types.proto"; +import "gogoproto/gogo.proto"; + +message PexRequest {} + +message PexAddrs { + repeated NetAddress addrs = 1 [(gogoproto.nullable) = false]; +} + +message Message { + oneof sum { + PexRequest pex_request = 1; + PexAddrs pex_addrs = 2; + } +} diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go new file mode 100644 index 000000000..209d9cc78 --- /dev/null +++ b/proto/tendermint/p2p/types.pb.go @@ -0,0 +1,1411 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/p2p/types.proto + +package p2p + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type NetAddress struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` +} + +func (m *NetAddress) Reset() { *m = NetAddress{} } +func (m *NetAddress) String() string { return proto.CompactTextString(m) } +func (*NetAddress) ProtoMessage() {} +func (*NetAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{0} +} +func (m *NetAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetAddress.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetAddress.Merge(m, src) +} +func (m *NetAddress) XXX_Size() int { + return m.Size() +} +func (m *NetAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NetAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_NetAddress proto.InternalMessageInfo + +func (m *NetAddress) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *NetAddress) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + +func (m *NetAddress) GetPort() uint32 { + if m != nil { + return m.Port + } + return 0 +} + +type ProtocolVersion struct { + P2P uint64 `protobuf:"varint,1,opt,name=p2p,proto3" json:"p2p,omitempty"` + Block uint64 `protobuf:"varint,2,opt,name=block,proto3" json:"block,omitempty"` + App uint64 `protobuf:"varint,3,opt,name=app,proto3" json:"app,omitempty"` +} + +func (m *ProtocolVersion) Reset() { *m = ProtocolVersion{} } +func (m *ProtocolVersion) String() string { return proto.CompactTextString(m) } +func (*ProtocolVersion) ProtoMessage() {} +func (*ProtocolVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{1} +} +func (m *ProtocolVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProtocolVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProtocolVersion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProtocolVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProtocolVersion.Merge(m, src) +} +func (m *ProtocolVersion) XXX_Size() int { + return m.Size() +} +func (m *ProtocolVersion) XXX_DiscardUnknown() { + xxx_messageInfo_ProtocolVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_ProtocolVersion proto.InternalMessageInfo + +func (m *ProtocolVersion) GetP2P() uint64 { + if m != nil { + return m.P2P + } + return 0 +} + +func (m *ProtocolVersion) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *ProtocolVersion) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + +type DefaultNodeInfo struct { + ProtocolVersion ProtocolVersion `protobuf:"bytes,1,opt,name=protocol_version,json=protocolVersion,proto3" json:"protocol_version"` + DefaultNodeID string `protobuf:"bytes,2,opt,name=default_node_id,json=defaultNodeId,proto3" json:"default_node_id,omitempty"` + ListenAddr string `protobuf:"bytes,3,opt,name=listen_addr,json=listenAddr,proto3" json:"listen_addr,omitempty"` + Network string `protobuf:"bytes,4,opt,name=network,proto3" json:"network,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + Channels []byte `protobuf:"bytes,6,opt,name=channels,proto3" json:"channels,omitempty"` + Moniker string `protobuf:"bytes,7,opt,name=moniker,proto3" json:"moniker,omitempty"` + Other DefaultNodeInfoOther `protobuf:"bytes,8,opt,name=other,proto3" json:"other"` +} + +func (m *DefaultNodeInfo) Reset() { *m = DefaultNodeInfo{} } +func (m *DefaultNodeInfo) String() string { return proto.CompactTextString(m) } +func (*DefaultNodeInfo) ProtoMessage() {} +func (*DefaultNodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{2} +} +func (m *DefaultNodeInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DefaultNodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DefaultNodeInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DefaultNodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultNodeInfo.Merge(m, src) +} +func (m *DefaultNodeInfo) XXX_Size() int { + return m.Size() +} +func (m *DefaultNodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultNodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultNodeInfo proto.InternalMessageInfo + +func (m *DefaultNodeInfo) GetProtocolVersion() ProtocolVersion { + if m != nil { + return m.ProtocolVersion + } + return ProtocolVersion{} +} + +func (m *DefaultNodeInfo) GetDefaultNodeID() string { + if m != nil { + return m.DefaultNodeID + } + return "" +} + +func (m *DefaultNodeInfo) GetListenAddr() string { + if m != nil { + return m.ListenAddr + } + return "" +} + +func (m *DefaultNodeInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *DefaultNodeInfo) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *DefaultNodeInfo) GetChannels() []byte { + if m != nil { + return m.Channels + } + return nil +} + +func (m *DefaultNodeInfo) GetMoniker() string { + if m != nil { + return m.Moniker + } + return "" +} + +func (m *DefaultNodeInfo) GetOther() DefaultNodeInfoOther { + if m != nil { + return m.Other + } + return DefaultNodeInfoOther{} +} + +type DefaultNodeInfoOther struct { + TxIndex string `protobuf:"bytes,1,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` + RPCAddress string `protobuf:"bytes,2,opt,name=rpc_address,json=rpcAddress,proto3" json:"rpc_address,omitempty"` +} + +func (m *DefaultNodeInfoOther) Reset() { *m = DefaultNodeInfoOther{} } +func (m *DefaultNodeInfoOther) String() string { return proto.CompactTextString(m) } +func (*DefaultNodeInfoOther) ProtoMessage() {} +func (*DefaultNodeInfoOther) Descriptor() ([]byte, []int) { + return fileDescriptor_c8a29e659aeca578, []int{3} +} +func (m *DefaultNodeInfoOther) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DefaultNodeInfoOther) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DefaultNodeInfoOther.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DefaultNodeInfoOther) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultNodeInfoOther.Merge(m, src) +} +func (m *DefaultNodeInfoOther) XXX_Size() int { + return m.Size() +} +func (m *DefaultNodeInfoOther) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultNodeInfoOther.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultNodeInfoOther proto.InternalMessageInfo + +func (m *DefaultNodeInfoOther) GetTxIndex() string { + if m != nil { + return m.TxIndex + } + return "" +} + +func (m *DefaultNodeInfoOther) GetRPCAddress() string { + if m != nil { + return m.RPCAddress + } + return "" +} + +func init() { + proto.RegisterType((*NetAddress)(nil), "tendermint.p2p.NetAddress") + proto.RegisterType((*ProtocolVersion)(nil), "tendermint.p2p.ProtocolVersion") + proto.RegisterType((*DefaultNodeInfo)(nil), "tendermint.p2p.DefaultNodeInfo") + proto.RegisterType((*DefaultNodeInfoOther)(nil), "tendermint.p2p.DefaultNodeInfoOther") +} + +func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } + +var fileDescriptor_c8a29e659aeca578 = []byte{ + // 479 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x3d, 0x8f, 0xda, 0x40, + 0x10, 0xc5, 0xc6, 0x7c, 0xdc, 0x10, 0x8e, 0xcb, 0x0a, 0x45, 0x3e, 0x0a, 0x1b, 0xa1, 0x14, 0x54, + 0x20, 0x39, 0x4a, 0x91, 0x2e, 0x21, 0x34, 0x34, 0x77, 0xd6, 0x2a, 0x4a, 0x91, 0xc6, 0x02, 0xef, + 0x1e, 0xac, 0x30, 0xbb, 0xab, 0xf5, 0x5e, 0x42, 0xfe, 0x45, 0x7e, 0xd6, 0x95, 0x57, 0xa6, 0xb2, + 0x22, 0x53, 0xe6, 0x4f, 0x44, 0x5e, 0xfb, 0x12, 0x1f, 0x4a, 0x37, 0x6f, 0xbe, 0xde, 0xcc, 0xd3, + 0x83, 0x91, 0xa6, 0x9c, 0x50, 0x75, 0x60, 0x5c, 0xcf, 0x65, 0x20, 0xe7, 0xfa, 0xbb, 0xa4, 0xe9, + 0x4c, 0x2a, 0xa1, 0x05, 0xba, 0xfc, 0x57, 0x9b, 0xc9, 0x40, 0x8e, 0x86, 0x5b, 0xb1, 0x15, 0xa6, + 0x34, 0x2f, 0xa2, 0xb2, 0x6b, 0x12, 0x02, 0xdc, 0x50, 0xfd, 0x81, 0x10, 0x45, 0xd3, 0x14, 0xbd, + 0x02, 0x9b, 0x11, 0xd7, 0x1a, 0x5b, 0xd3, 0x8b, 0x45, 0x3b, 0xcf, 0x7c, 0x7b, 0xb5, 0xc4, 0x36, + 0x23, 0x26, 0x2f, 0x5d, 0xbb, 0x96, 0x0f, 0xb1, 0xcd, 0x24, 0x42, 0xe0, 0x48, 0xa1, 0xb4, 0xdb, + 0x1c, 0x5b, 0xd3, 0x3e, 0x36, 0xf1, 0xe4, 0x13, 0x0c, 0xc2, 0x62, 0x75, 0x2c, 0x92, 0xcf, 0x54, + 0xa5, 0x4c, 0x70, 0x74, 0x0d, 0x4d, 0x19, 0x48, 0xb3, 0xd7, 0x59, 0x74, 0xf2, 0xcc, 0x6f, 0x86, + 0x41, 0x88, 0x8b, 0x1c, 0x1a, 0x42, 0x6b, 0x93, 0x88, 0x78, 0x6f, 0x96, 0x3b, 0xb8, 0x04, 0xe8, + 0x0a, 0x9a, 0x6b, 0x29, 0xcd, 0x5a, 0x07, 0x17, 0xe1, 0xe4, 0xb7, 0x0d, 0x83, 0x25, 0xbd, 0x5b, + 0xdf, 0x27, 0xfa, 0x46, 0x10, 0xba, 0xe2, 0x77, 0x02, 0x85, 0x70, 0x25, 0x2b, 0xa6, 0xe8, 0x6b, + 0x49, 0x65, 0x38, 0x7a, 0x81, 0x3f, 0x7b, 0xfe, 0xfc, 0xec, 0xec, 0xa2, 0x85, 0xf3, 0x90, 0xf9, + 0x0d, 0x3c, 0x90, 0x67, 0x87, 0xbe, 0x83, 0x01, 0x29, 0x49, 0x22, 0x2e, 0x08, 0x8d, 0x18, 0xa9, + 0x9e, 0x7e, 0x99, 0x67, 0x7e, 0xbf, 0xce, 0xbf, 0xc4, 0x7d, 0x52, 0x83, 0x04, 0xf9, 0xd0, 0x4b, + 0x58, 0xaa, 0x29, 0x8f, 0xd6, 0x84, 0x28, 0x73, 0xfa, 0x05, 0x86, 0x32, 0x55, 0xc8, 0x8b, 0x5c, + 0xe8, 0x70, 0xaa, 0xbf, 0x09, 0xb5, 0x77, 0x1d, 0x53, 0x7c, 0x82, 0x45, 0xe5, 0xe9, 0xfc, 0x56, + 0x59, 0xa9, 0x20, 0x1a, 0x41, 0x37, 0xde, 0xad, 0x39, 0xa7, 0x49, 0xea, 0xb6, 0xc7, 0xd6, 0xf4, + 0x05, 0xfe, 0x8b, 0x8b, 0xa9, 0x83, 0xe0, 0x6c, 0x4f, 0x95, 0xdb, 0x29, 0xa7, 0x2a, 0x88, 0xde, + 0x43, 0x4b, 0xe8, 0x1d, 0x55, 0x6e, 0xd7, 0x88, 0xf1, 0xfa, 0x5c, 0x8c, 0x33, 0x1d, 0x6f, 0x8b, + 0xde, 0x4a, 0x91, 0x72, 0x70, 0xb2, 0x81, 0xe1, 0xff, 0x9a, 0xd0, 0x35, 0x74, 0xf5, 0x31, 0x62, + 0x9c, 0xd0, 0x63, 0xe9, 0x12, 0xdc, 0xd1, 0xc7, 0x55, 0x01, 0xd1, 0x1c, 0x7a, 0x4a, 0xc6, 0xe6, + 0x79, 0x9a, 0xa6, 0x95, 0x6c, 0x97, 0x79, 0xe6, 0x03, 0x0e, 0x3f, 0x56, 0xfe, 0xc2, 0xa0, 0x64, + 0x5c, 0xc5, 0x8b, 0xdb, 0x87, 0xdc, 0xb3, 0x1e, 0x73, 0xcf, 0xfa, 0x95, 0x7b, 0xd6, 0x8f, 0x93, + 0xd7, 0x78, 0x3c, 0x79, 0x8d, 0x9f, 0x27, 0xaf, 0xf1, 0xe5, 0xed, 0x96, 0xe9, 0xdd, 0xfd, 0x66, + 0x16, 0x8b, 0xc3, 0xbc, 0x66, 0xf0, 0xba, 0xd7, 0x8d, 0x8d, 0x9f, 0x9b, 0x7f, 0xd3, 0x36, 0xd9, + 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xb3, 0x68, 0x97, 0x15, 0x03, 0x00, 0x00, +} + +func (m *NetAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + } + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintTypes(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x12 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProtocolVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProtocolVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.App != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.App)) + i-- + dAtA[i] = 0x18 + } + if m.Block != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Block)) + i-- + dAtA[i] = 0x10 + } + if m.P2P != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.P2P)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DefaultNodeInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefaultNodeInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DefaultNodeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Other.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Moniker) > 0 { + i -= len(m.Moniker) + copy(dAtA[i:], m.Moniker) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Moniker))) + i-- + dAtA[i] = 0x3a + } + if len(m.Channels) > 0 { + i -= len(m.Channels) + copy(dAtA[i:], m.Channels) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Channels))) + i-- + dAtA[i] = 0x32 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x2a + } + if len(m.Network) > 0 { + i -= len(m.Network) + copy(dAtA[i:], m.Network) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Network))) + i-- + dAtA[i] = 0x22 + } + if len(m.ListenAddr) > 0 { + i -= len(m.ListenAddr) + copy(dAtA[i:], m.ListenAddr) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ListenAddr))) + i-- + dAtA[i] = 0x1a + } + if len(m.DefaultNodeID) > 0 { + i -= len(m.DefaultNodeID) + copy(dAtA[i:], m.DefaultNodeID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DefaultNodeID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ProtocolVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DefaultNodeInfoOther) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DefaultNodeInfoOther) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DefaultNodeInfoOther) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RPCAddress) > 0 { + i -= len(m.RPCAddress) + copy(dAtA[i:], m.RPCAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RPCAddress))) + i-- + dAtA[i] = 0x12 + } + if len(m.TxIndex) > 0 { + i -= len(m.TxIndex) + copy(dAtA[i:], m.TxIndex) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxIndex))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NetAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Port != 0 { + n += 1 + sovTypes(uint64(m.Port)) + } + return n +} + +func (m *ProtocolVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.P2P != 0 { + n += 1 + sovTypes(uint64(m.P2P)) + } + if m.Block != 0 { + n += 1 + sovTypes(uint64(m.Block)) + } + if m.App != 0 { + n += 1 + sovTypes(uint64(m.App)) + } + return n +} + +func (m *DefaultNodeInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ProtocolVersion.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.DefaultNodeID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ListenAddr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Network) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Channels) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Moniker) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Other.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *DefaultNodeInfoOther) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TxIndex) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.RPCAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NetAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProtocolVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProtocolVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProtocolVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2P", wireType) + } + m.P2P = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2P |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + m.Block = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Block |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) + } + m.App = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.App |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefaultNodeInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefaultNodeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefaultNodeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProtocolVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultNodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultNodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListenAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListenAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Channels = append(m.Channels[:0], dAtA[iNdEx:postIndex]...) + if m.Channels == nil { + m.Channels = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Moniker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Other", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Other.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DefaultNodeInfoOther) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DefaultNodeInfoOther: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DefaultNodeInfoOther: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxIndex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxIndex = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RPCAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RPCAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto new file mode 100644 index 000000000..0d42ea400 --- /dev/null +++ b/proto/tendermint/p2p/types.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package tendermint.p2p; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; + +import "gogoproto/gogo.proto"; + +message NetAddress { + string id = 1 [(gogoproto.customname) = "ID"]; + string ip = 2 [(gogoproto.customname) = "IP"]; + uint32 port = 3; +} + +message ProtocolVersion { + uint64 p2p = 1 [(gogoproto.customname) = "P2P"]; + uint64 block = 2; + uint64 app = 3; +} + +message DefaultNodeInfo { + ProtocolVersion protocol_version = 1 [(gogoproto.nullable) = false]; + string default_node_id = 2 [(gogoproto.customname) = "DefaultNodeID"]; + string listen_addr = 3; + string network = 4; + string version = 5; + bytes channels = 6; + string moniker = 7; + DefaultNodeInfoOther other = 8 [(gogoproto.nullable) = false]; +} + +message DefaultNodeInfoOther { + string tx_index = 1; + string rpc_address = 2 [(gogoproto.customname) = "RPCAddress"]; +} diff --git a/proto/tendermint/privval/types.pb.go b/proto/tendermint/privval/types.pb.go new file mode 100644 index 000000000..da30f7527 --- /dev/null +++ b/proto/tendermint/privval/types.pb.go @@ -0,0 +1,3064 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/privval/types.proto + +package privval + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + types "github.com/tendermint/tendermint/proto/tendermint/types" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Errors int32 + +const ( + Errors_ERRORS_UNKNOWN Errors = 0 + Errors_ERRORS_UNEXPECTED_RESPONSE Errors = 1 + Errors_ERRORS_NO_CONNECTION Errors = 2 + Errors_ERRORS_CONNECTION_TIMEOUT Errors = 3 + Errors_ERRORS_READ_TIMEOUT Errors = 4 + Errors_ERRORS_WRITE_TIMEOUT Errors = 5 +) + +var Errors_name = map[int32]string{ + 0: "ERRORS_UNKNOWN", + 1: "ERRORS_UNEXPECTED_RESPONSE", + 2: "ERRORS_NO_CONNECTION", + 3: "ERRORS_CONNECTION_TIMEOUT", + 4: "ERRORS_READ_TIMEOUT", + 5: "ERRORS_WRITE_TIMEOUT", +} + +var Errors_value = map[string]int32{ + "ERRORS_UNKNOWN": 0, + "ERRORS_UNEXPECTED_RESPONSE": 1, + "ERRORS_NO_CONNECTION": 2, + "ERRORS_CONNECTION_TIMEOUT": 3, + "ERRORS_READ_TIMEOUT": 4, + "ERRORS_WRITE_TIMEOUT": 5, +} + +func (x Errors) String() string { + return proto.EnumName(Errors_name, int32(x)) +} + +func (Errors) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{0} +} + +type RemoteSignerError struct { + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *RemoteSignerError) Reset() { *m = RemoteSignerError{} } +func (m *RemoteSignerError) String() string { return proto.CompactTextString(m) } +func (*RemoteSignerError) ProtoMessage() {} +func (*RemoteSignerError) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{0} +} +func (m *RemoteSignerError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoteSignerError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoteSignerError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoteSignerError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSignerError.Merge(m, src) +} +func (m *RemoteSignerError) XXX_Size() int { + return m.Size() +} +func (m *RemoteSignerError) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSignerError.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSignerError proto.InternalMessageInfo + +func (m *RemoteSignerError) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *RemoteSignerError) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// PubKeyRequest requests the consensus public key from the remote signer. +type PubKeyRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *PubKeyRequest) Reset() { *m = PubKeyRequest{} } +func (m *PubKeyRequest) String() string { return proto.CompactTextString(m) } +func (*PubKeyRequest) ProtoMessage() {} +func (*PubKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{1} +} +func (m *PubKeyRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyRequest.Merge(m, src) +} +func (m *PubKeyRequest) XXX_Size() int { + return m.Size() +} +func (m *PubKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyRequest proto.InternalMessageInfo + +func (m *PubKeyRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// PubKeyResponse is a response message containing the public key. +type PubKeyResponse struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *PubKeyResponse) Reset() { *m = PubKeyResponse{} } +func (m *PubKeyResponse) String() string { return proto.CompactTextString(m) } +func (*PubKeyResponse) ProtoMessage() {} +func (*PubKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{2} +} +func (m *PubKeyResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PubKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PubKeyResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PubKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PubKeyResponse.Merge(m, src) +} +func (m *PubKeyResponse) XXX_Size() int { + return m.Size() +} +func (m *PubKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PubKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PubKeyResponse proto.InternalMessageInfo + +func (m *PubKeyResponse) GetPubKey() crypto.PublicKey { + if m != nil { + return m.PubKey + } + return crypto.PublicKey{} +} + +func (m *PubKeyResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignVoteRequest is a request to sign a vote +type SignVoteRequest struct { + Vote *types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *SignVoteRequest) Reset() { *m = SignVoteRequest{} } +func (m *SignVoteRequest) String() string { return proto.CompactTextString(m) } +func (*SignVoteRequest) ProtoMessage() {} +func (*SignVoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{3} +} +func (m *SignVoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignVoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignVoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignVoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignVoteRequest.Merge(m, src) +} +func (m *SignVoteRequest) XXX_Size() int { + return m.Size() +} +func (m *SignVoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignVoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignVoteRequest proto.InternalMessageInfo + +func (m *SignVoteRequest) GetVote() *types.Vote { + if m != nil { + return m.Vote + } + return nil +} + +func (m *SignVoteRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// SignedVoteResponse is a response containing a signed vote or an error +type SignedVoteResponse struct { + Vote types.Vote `protobuf:"bytes,1,opt,name=vote,proto3" json:"vote"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedVoteResponse) Reset() { *m = SignedVoteResponse{} } +func (m *SignedVoteResponse) String() string { return proto.CompactTextString(m) } +func (*SignedVoteResponse) ProtoMessage() {} +func (*SignedVoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{4} +} +func (m *SignedVoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedVoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedVoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedVoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedVoteResponse.Merge(m, src) +} +func (m *SignedVoteResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedVoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedVoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedVoteResponse proto.InternalMessageInfo + +func (m *SignedVoteResponse) GetVote() types.Vote { + if m != nil { + return m.Vote + } + return types.Vote{} +} + +func (m *SignedVoteResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// SignProposalRequest is a request to sign a proposal +type SignProposalRequest struct { + Proposal *types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *SignProposalRequest) Reset() { *m = SignProposalRequest{} } +func (m *SignProposalRequest) String() string { return proto.CompactTextString(m) } +func (*SignProposalRequest) ProtoMessage() {} +func (*SignProposalRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{5} +} +func (m *SignProposalRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignProposalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignProposalRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignProposalRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignProposalRequest.Merge(m, src) +} +func (m *SignProposalRequest) XXX_Size() int { + return m.Size() +} +func (m *SignProposalRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignProposalRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignProposalRequest proto.InternalMessageInfo + +func (m *SignProposalRequest) GetProposal() *types.Proposal { + if m != nil { + return m.Proposal + } + return nil +} + +func (m *SignProposalRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +// SignedProposalResponse is response containing a signed proposal or an error +type SignedProposalResponse struct { + Proposal types.Proposal `protobuf:"bytes,1,opt,name=proposal,proto3" json:"proposal"` + Error *RemoteSignerError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *SignedProposalResponse) Reset() { *m = SignedProposalResponse{} } +func (m *SignedProposalResponse) String() string { return proto.CompactTextString(m) } +func (*SignedProposalResponse) ProtoMessage() {} +func (*SignedProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{6} +} +func (m *SignedProposalResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedProposalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedProposalResponse.Merge(m, src) +} +func (m *SignedProposalResponse) XXX_Size() int { + return m.Size() +} +func (m *SignedProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignedProposalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedProposalResponse proto.InternalMessageInfo + +func (m *SignedProposalResponse) GetProposal() types.Proposal { + if m != nil { + return m.Proposal + } + return types.Proposal{} +} + +func (m *SignedProposalResponse) GetError() *RemoteSignerError { + if m != nil { + return m.Error + } + return nil +} + +// PingRequest is a request to confirm that the connection is alive. +type PingRequest struct { +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{7} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return m.Size() +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +// PingResponse is a response to confirm that the connection is alive. +type PingResponse struct { +} + +func (m *PingResponse) Reset() { *m = PingResponse{} } +func (m *PingResponse) String() string { return proto.CompactTextString(m) } +func (*PingResponse) ProtoMessage() {} +func (*PingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{8} +} +func (m *PingResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingResponse.Merge(m, src) +} +func (m *PingResponse) XXX_Size() int { + return m.Size() +} +func (m *PingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PingResponse proto.InternalMessageInfo + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_PubKeyRequest + // *Message_PubKeyResponse + // *Message_SignVoteRequest + // *Message_SignedVoteResponse + // *Message_SignProposalRequest + // *Message_SignedProposalResponse + // *Message_PingRequest + // *Message_PingResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{9} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_PubKeyRequest struct { + PubKeyRequest *PubKeyRequest `protobuf:"bytes,1,opt,name=pub_key_request,json=pubKeyRequest,proto3,oneof" json:"pub_key_request,omitempty"` +} +type Message_PubKeyResponse struct { + PubKeyResponse *PubKeyResponse `protobuf:"bytes,2,opt,name=pub_key_response,json=pubKeyResponse,proto3,oneof" json:"pub_key_response,omitempty"` +} +type Message_SignVoteRequest struct { + SignVoteRequest *SignVoteRequest `protobuf:"bytes,3,opt,name=sign_vote_request,json=signVoteRequest,proto3,oneof" json:"sign_vote_request,omitempty"` +} +type Message_SignedVoteResponse struct { + SignedVoteResponse *SignedVoteResponse `protobuf:"bytes,4,opt,name=signed_vote_response,json=signedVoteResponse,proto3,oneof" json:"signed_vote_response,omitempty"` +} +type Message_SignProposalRequest struct { + SignProposalRequest *SignProposalRequest `protobuf:"bytes,5,opt,name=sign_proposal_request,json=signProposalRequest,proto3,oneof" json:"sign_proposal_request,omitempty"` +} +type Message_SignedProposalResponse struct { + SignedProposalResponse *SignedProposalResponse `protobuf:"bytes,6,opt,name=signed_proposal_response,json=signedProposalResponse,proto3,oneof" json:"signed_proposal_response,omitempty"` +} +type Message_PingRequest struct { + PingRequest *PingRequest `protobuf:"bytes,7,opt,name=ping_request,json=pingRequest,proto3,oneof" json:"ping_request,omitempty"` +} +type Message_PingResponse struct { + PingResponse *PingResponse `protobuf:"bytes,8,opt,name=ping_response,json=pingResponse,proto3,oneof" json:"ping_response,omitempty"` +} + +func (*Message_PubKeyRequest) isMessage_Sum() {} +func (*Message_PubKeyResponse) isMessage_Sum() {} +func (*Message_SignVoteRequest) isMessage_Sum() {} +func (*Message_SignedVoteResponse) isMessage_Sum() {} +func (*Message_SignProposalRequest) isMessage_Sum() {} +func (*Message_SignedProposalResponse) isMessage_Sum() {} +func (*Message_PingRequest) isMessage_Sum() {} +func (*Message_PingResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetPubKeyRequest() *PubKeyRequest { + if x, ok := m.GetSum().(*Message_PubKeyRequest); ok { + return x.PubKeyRequest + } + return nil +} + +func (m *Message) GetPubKeyResponse() *PubKeyResponse { + if x, ok := m.GetSum().(*Message_PubKeyResponse); ok { + return x.PubKeyResponse + } + return nil +} + +func (m *Message) GetSignVoteRequest() *SignVoteRequest { + if x, ok := m.GetSum().(*Message_SignVoteRequest); ok { + return x.SignVoteRequest + } + return nil +} + +func (m *Message) GetSignedVoteResponse() *SignedVoteResponse { + if x, ok := m.GetSum().(*Message_SignedVoteResponse); ok { + return x.SignedVoteResponse + } + return nil +} + +func (m *Message) GetSignProposalRequest() *SignProposalRequest { + if x, ok := m.GetSum().(*Message_SignProposalRequest); ok { + return x.SignProposalRequest + } + return nil +} + +func (m *Message) GetSignedProposalResponse() *SignedProposalResponse { + if x, ok := m.GetSum().(*Message_SignedProposalResponse); ok { + return x.SignedProposalResponse + } + return nil +} + +func (m *Message) GetPingRequest() *PingRequest { + if x, ok := m.GetSum().(*Message_PingRequest); ok { + return x.PingRequest + } + return nil +} + +func (m *Message) GetPingResponse() *PingResponse { + if x, ok := m.GetSum().(*Message_PingResponse); ok { + return x.PingResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_PubKeyRequest)(nil), + (*Message_PubKeyResponse)(nil), + (*Message_SignVoteRequest)(nil), + (*Message_SignedVoteResponse)(nil), + (*Message_SignProposalRequest)(nil), + (*Message_SignedProposalResponse)(nil), + (*Message_PingRequest)(nil), + (*Message_PingResponse)(nil), + } +} + +// AuthSigMessage is duplicated from p2p prior to the P2P refactor. +// It is used for the SecretConnection until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 +type AuthSigMessage struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Sig []byte `protobuf:"bytes,2,opt,name=sig,proto3" json:"sig,omitempty"` +} + +func (m *AuthSigMessage) Reset() { *m = AuthSigMessage{} } +func (m *AuthSigMessage) String() string { return proto.CompactTextString(m) } +func (*AuthSigMessage) ProtoMessage() {} +func (*AuthSigMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_cb4e437a5328cf9c, []int{10} +} +func (m *AuthSigMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthSigMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthSigMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthSigMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthSigMessage.Merge(m, src) +} +func (m *AuthSigMessage) XXX_Size() int { + return m.Size() +} +func (m *AuthSigMessage) XXX_DiscardUnknown() { + xxx_messageInfo_AuthSigMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthSigMessage proto.InternalMessageInfo + +func (m *AuthSigMessage) GetPubKey() crypto.PublicKey { + if m != nil { + return m.PubKey + } + return crypto.PublicKey{} +} + +func (m *AuthSigMessage) GetSig() []byte { + if m != nil { + return m.Sig + } + return nil +} + +func init() { + proto.RegisterEnum("tendermint.privval.Errors", Errors_name, Errors_value) + proto.RegisterType((*RemoteSignerError)(nil), "tendermint.privval.RemoteSignerError") + proto.RegisterType((*PubKeyRequest)(nil), "tendermint.privval.PubKeyRequest") + proto.RegisterType((*PubKeyResponse)(nil), "tendermint.privval.PubKeyResponse") + proto.RegisterType((*SignVoteRequest)(nil), "tendermint.privval.SignVoteRequest") + proto.RegisterType((*SignedVoteResponse)(nil), "tendermint.privval.SignedVoteResponse") + proto.RegisterType((*SignProposalRequest)(nil), "tendermint.privval.SignProposalRequest") + proto.RegisterType((*SignedProposalResponse)(nil), "tendermint.privval.SignedProposalResponse") + proto.RegisterType((*PingRequest)(nil), "tendermint.privval.PingRequest") + proto.RegisterType((*PingResponse)(nil), "tendermint.privval.PingResponse") + proto.RegisterType((*Message)(nil), "tendermint.privval.Message") + proto.RegisterType((*AuthSigMessage)(nil), "tendermint.privval.AuthSigMessage") +} + +func init() { proto.RegisterFile("tendermint/privval/types.proto", fileDescriptor_cb4e437a5328cf9c) } + +var fileDescriptor_cb4e437a5328cf9c = []byte{ + // 779 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x4f, 0xdb, 0x4a, + 0x14, 0xb6, 0xc9, 0x0b, 0x4e, 0x1e, 0x84, 0x81, 0xcb, 0x0d, 0x11, 0xd7, 0xe4, 0xfa, 0xea, 0xb6, + 0x28, 0x8b, 0xa4, 0xa2, 0x52, 0xa5, 0x8a, 0x6e, 0x78, 0x58, 0x4d, 0x14, 0xe1, 0xa4, 0x93, 0x50, + 0x10, 0x52, 0x65, 0xe5, 0x31, 0x75, 0x2c, 0x88, 0xed, 0x7a, 0x1c, 0xa4, 0xac, 0xbb, 0xeb, 0xaa, + 0x52, 0xff, 0x44, 0xd7, 0xfd, 0x15, 0x2c, 0x59, 0x76, 0x55, 0x55, 0xf0, 0x47, 0xaa, 0x8c, 0x27, + 0x8e, 0xf3, 0x42, 0xad, 0xd8, 0xcd, 0x9c, 0x73, 0xe6, 0x3b, 0xdf, 0x37, 0xf3, 0xd9, 0x07, 0x24, + 0x97, 0x98, 0x1d, 0xe2, 0xf4, 0x0c, 0xd3, 0x2d, 0xda, 0x8e, 0x71, 0x7d, 0xdd, 0xbc, 0x2a, 0xba, + 0x03, 0x9b, 0xd0, 0x82, 0xed, 0x58, 0xae, 0x85, 0xd0, 0x38, 0x5f, 0xe0, 0xf9, 0xec, 0x76, 0xe0, + 0x4c, 0xdb, 0x19, 0xd8, 0xae, 0x55, 0xbc, 0x24, 0x03, 0x7e, 0x62, 0x22, 0xcb, 0x90, 0x82, 0x78, + 0xd9, 0x0d, 0xdd, 0xd2, 0x2d, 0xb6, 0x2c, 0x0e, 0x57, 0x5e, 0x54, 0x2e, 0xc3, 0x1a, 0x26, 0x3d, + 0xcb, 0x25, 0x75, 0x43, 0x37, 0x89, 0xa3, 0x38, 0x8e, 0xe5, 0x20, 0x04, 0xe1, 0xb6, 0xd5, 0x21, + 0x19, 0x31, 0x27, 0xee, 0x46, 0x30, 0x5b, 0xa3, 0x1c, 0xc4, 0x3b, 0x84, 0xb6, 0x1d, 0xc3, 0x76, + 0x0d, 0xcb, 0xcc, 0x2c, 0xe5, 0xc4, 0xdd, 0x15, 0x1c, 0x0c, 0xc9, 0x79, 0x48, 0xd6, 0xfa, 0xad, + 0x0a, 0x19, 0x60, 0xf2, 0xa1, 0x4f, 0xa8, 0x8b, 0xb6, 0x60, 0xb9, 0xdd, 0x6d, 0x1a, 0xa6, 0x66, + 0x74, 0x18, 0xd4, 0x0a, 0x8e, 0xb1, 0x7d, 0xb9, 0x23, 0x7f, 0x12, 0x21, 0x35, 0x2a, 0xa6, 0xb6, + 0x65, 0x52, 0x82, 0xf6, 0x21, 0x66, 0xf7, 0x5b, 0xda, 0x25, 0x19, 0xb0, 0xe2, 0xf8, 0xde, 0x76, + 0x21, 0x70, 0x03, 0x9e, 0xda, 0x42, 0xad, 0xdf, 0xba, 0x32, 0xda, 0x15, 0x32, 0x38, 0x0c, 0xdf, + 0xfc, 0xd8, 0x11, 0x70, 0xd4, 0x66, 0x20, 0x68, 0x1f, 0x22, 0x64, 0x48, 0x9d, 0xf1, 0x8a, 0xef, + 0xfd, 0x5f, 0x98, 0xbd, 0xbc, 0xc2, 0x8c, 0x4e, 0xec, 0x9d, 0x91, 0xcf, 0x61, 0x75, 0x18, 0x7d, + 0x6b, 0xb9, 0x64, 0x44, 0x3d, 0x0f, 0xe1, 0x6b, 0xcb, 0x25, 0x9c, 0xc9, 0x66, 0x10, 0xce, 0xbb, + 0x53, 0x56, 0xcc, 0x6a, 0x26, 0x64, 0x2e, 0x4d, 0xca, 0xfc, 0x28, 0x02, 0x62, 0x0d, 0x3b, 0x1e, + 0x38, 0x97, 0xfa, 0xec, 0x77, 0xd0, 0xb9, 0x42, 0xaf, 0xc7, 0xa3, 0xf4, 0x75, 0x61, 0x7d, 0x18, + 0xad, 0x39, 0x96, 0x6d, 0xd1, 0xe6, 0xd5, 0x48, 0xe3, 0x0b, 0x58, 0xb6, 0x79, 0x88, 0x33, 0xc9, + 0xce, 0x32, 0xf1, 0x0f, 0xf9, 0xb5, 0x0f, 0xe9, 0xfd, 0x22, 0xc2, 0xa6, 0xa7, 0x77, 0xdc, 0x8c, + 0x6b, 0x7e, 0xf5, 0x27, 0xdd, 0xb8, 0xf6, 0x71, 0xcf, 0x47, 0xe9, 0x4f, 0x42, 0xbc, 0x66, 0x98, + 0x3a, 0xd7, 0x2d, 0xa7, 0x20, 0xe1, 0x6d, 0x3d, 0x66, 0xf2, 0xb7, 0x08, 0xc4, 0x4e, 0x08, 0xa5, + 0x4d, 0x9d, 0xa0, 0x0a, 0xac, 0x72, 0x13, 0x6a, 0x8e, 0x57, 0xce, 0xc9, 0xfe, 0x3b, 0xaf, 0xe3, + 0x84, 0xdd, 0x4b, 0x02, 0x4e, 0xda, 0x13, 0xfe, 0x57, 0x21, 0x3d, 0x06, 0xf3, 0x9a, 0x71, 0xfe, + 0xf2, 0x43, 0x68, 0x5e, 0x65, 0x49, 0xc0, 0x29, 0x7b, 0xf2, 0x0b, 0x79, 0x03, 0x6b, 0xd4, 0xd0, + 0x4d, 0x6d, 0xe8, 0x08, 0x9f, 0x5e, 0x88, 0x01, 0xfe, 0x37, 0x0f, 0x70, 0xca, 0xd4, 0x25, 0x01, + 0xaf, 0xd2, 0x29, 0x9f, 0x5f, 0xc0, 0x06, 0x65, 0xef, 0x35, 0x02, 0xe5, 0x34, 0xc3, 0x0c, 0xf5, + 0xc9, 0x22, 0xd4, 0x49, 0x3f, 0x97, 0x04, 0x8c, 0xe8, 0xac, 0xcb, 0xdf, 0xc1, 0x5f, 0x8c, 0xee, + 0xe8, 0x11, 0x7d, 0xca, 0x11, 0x06, 0xfe, 0x74, 0x11, 0xf8, 0x94, 0x4f, 0x4b, 0x02, 0x5e, 0xa7, + 0x73, 0xec, 0xfb, 0x1e, 0x32, 0x9c, 0x7a, 0xa0, 0x01, 0xa7, 0x1f, 0x65, 0x1d, 0xf2, 0x8b, 0xe9, + 0x4f, 0xdb, 0xb3, 0x24, 0xe0, 0x4d, 0x3a, 0xdf, 0xb8, 0xc7, 0x90, 0xb0, 0x0d, 0x53, 0xf7, 0xd9, + 0xc7, 0x18, 0xf6, 0xce, 0xdc, 0x17, 0x1c, 0xbb, 0xac, 0x24, 0xe0, 0xb8, 0x3d, 0xde, 0xa2, 0xd7, + 0x90, 0xe4, 0x28, 0x9c, 0xe2, 0x32, 0x83, 0xc9, 0x2d, 0x86, 0xf1, 0x89, 0x25, 0xec, 0xc0, 0xfe, + 0x30, 0x02, 0x21, 0xda, 0xef, 0xc9, 0x1a, 0xa4, 0x0e, 0xfa, 0x6e, 0xb7, 0x6e, 0xe8, 0x23, 0xeb, + 0x3e, 0xea, 0xff, 0x99, 0x86, 0x10, 0x35, 0x74, 0xe6, 0xce, 0x04, 0x1e, 0x2e, 0xf3, 0x5f, 0x45, + 0x88, 0xb2, 0xaf, 0x88, 0x22, 0x04, 0x29, 0x05, 0xe3, 0x2a, 0xae, 0x6b, 0xa7, 0x6a, 0x45, 0xad, + 0x9e, 0xa9, 0x69, 0x01, 0x49, 0x90, 0xf5, 0x63, 0xca, 0x79, 0x4d, 0x39, 0x6a, 0x28, 0xc7, 0x1a, + 0x56, 0xea, 0xb5, 0xaa, 0x5a, 0x57, 0xd2, 0x22, 0xca, 0xc0, 0x06, 0xcf, 0xab, 0x55, 0xed, 0xa8, + 0xaa, 0xaa, 0xca, 0x51, 0xa3, 0x5c, 0x55, 0xd3, 0x4b, 0xe8, 0x1f, 0xd8, 0xe2, 0x99, 0x71, 0x58, + 0x6b, 0x94, 0x4f, 0x94, 0xea, 0x69, 0x23, 0x1d, 0x42, 0x7f, 0xc3, 0x3a, 0x4f, 0x63, 0xe5, 0xe0, + 0xd8, 0x4f, 0x84, 0x03, 0x88, 0x67, 0xb8, 0xdc, 0x50, 0xfc, 0x4c, 0xe4, 0xb0, 0x7e, 0x73, 0x27, + 0x89, 0xb7, 0x77, 0x92, 0xf8, 0xf3, 0x4e, 0x12, 0x3f, 0xdf, 0x4b, 0xc2, 0xed, 0xbd, 0x24, 0x7c, + 0xbf, 0x97, 0x84, 0x8b, 0x97, 0xba, 0xe1, 0x76, 0xfb, 0xad, 0x42, 0xdb, 0xea, 0x15, 0x83, 0xc3, + 0x31, 0x38, 0x79, 0x87, 0x03, 0x71, 0x76, 0x14, 0xb7, 0xa2, 0x2c, 0xf3, 0xfc, 0x57, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x46, 0x64, 0xeb, 0xa4, 0xa7, 0x07, 0x00, 0x00, +} + +func (m *RemoteSignerError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoteSignerError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoteSignerError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PubKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PubKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignVoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Vote != nil { + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedVoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Vote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignProposalRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + if m.Proposal != nil { + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedProposalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Proposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PingResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_PubKeyRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyRequest != nil { + { + size, err := m.PubKeyRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_PubKeyResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PubKeyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PubKeyResponse != nil { + { + size, err := m.PubKeyResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_SignVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignVoteRequest != nil { + { + size, err := m.SignVoteRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedVoteResponse != nil { + { + size, err := m.SignedVoteResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Message_SignProposalRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignProposalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignProposalRequest != nil { + { + size, err := m.SignProposalRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Message_SignedProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SignedProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignedProposalResponse != nil { + { + size, err := m.SignedProposalResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Message_PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingRequest != nil { + { + size, err := m.PingRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_PingResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_PingResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PingResponse != nil { + { + size, err := m.PingResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *AuthSigMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthSigMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthSigMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sig) > 0 { + i -= len(m.Sig) + copy(dAtA[i:], m.Sig) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sig))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RemoteSignerError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Vote != nil { + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Vote.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proposal != nil { + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Proposal.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_PubKeyRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyRequest != nil { + l = m.PubKeyRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PubKeyResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKeyResponse != nil { + l = m.PubKeyResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignVoteRequest != nil { + l = m.SignVoteRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedVoteResponse != nil { + l = m.SignedVoteResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignProposalRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignProposalRequest != nil { + l = m.SignProposalRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SignedProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedProposalResponse != nil { + l = m.SignedProposalResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingRequest != nil { + l = m.PingRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_PingResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingResponse != nil { + l = m.PingResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *AuthSigMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Sig) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RemoteSignerError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoteSignerError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoteSignerError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PubKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PubKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PubKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignVoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignVoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignVoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = &types.Vote{} + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedVoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedVoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedVoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Vote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignProposalRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignProposalRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignProposalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proposal == nil { + m.Proposal = &types.Proposal{} + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedProposalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedProposalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &RemoteSignerError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PubKeyResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PubKeyResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignVoteRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignVoteRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignVoteRequest{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedVoteResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedVoteResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedVoteResponse{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignProposalRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignProposalRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignProposalRequest{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedProposalResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignedProposalResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SignedProposalResponse{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingRequest{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PingResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &PingResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_PingResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthSigMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthSigMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthSigMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sig", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sig = append(m.Sig[:0], dAtA[iNdEx:postIndex]...) + if m.Sig == nil { + m.Sig = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/privval/types.proto b/proto/tendermint/privval/types.proto new file mode 100644 index 000000000..0d712447c --- /dev/null +++ b/proto/tendermint/privval/types.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; +package tendermint.privval; + +import "tendermint/crypto/keys.proto"; +import "tendermint/types/types.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; + +enum Errors { + ERRORS_UNKNOWN = 0; + ERRORS_UNEXPECTED_RESPONSE = 1; + ERRORS_NO_CONNECTION = 2; + ERRORS_CONNECTION_TIMEOUT = 3; + ERRORS_READ_TIMEOUT = 4; + ERRORS_WRITE_TIMEOUT = 5; +} + +message RemoteSignerError { + int32 code = 1; + string description = 2; +} + +// PubKeyRequest requests the consensus public key from the remote signer. +message PubKeyRequest { + string chain_id = 1; +} + +// PubKeyResponse is a response message containing the public key. +message PubKeyResponse { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignVoteRequest is a request to sign a vote +message SignVoteRequest { + tendermint.types.Vote vote = 1; + string chain_id = 2; +} + +// SignedVoteResponse is a response containing a signed vote or an error +message SignedVoteResponse { + tendermint.types.Vote vote = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// SignProposalRequest is a request to sign a proposal +message SignProposalRequest { + tendermint.types.Proposal proposal = 1; + string chain_id = 2; +} + +// SignedProposalResponse is response containing a signed proposal or an error +message SignedProposalResponse { + tendermint.types.Proposal proposal = 1 [(gogoproto.nullable) = false]; + RemoteSignerError error = 2; +} + +// PingRequest is a request to confirm that the connection is alive. +message PingRequest {} + +// PingResponse is a response to confirm that the connection is alive. +message PingResponse {} + +message Message { + oneof sum { + PubKeyRequest pub_key_request = 1; + PubKeyResponse pub_key_response = 2; + SignVoteRequest sign_vote_request = 3; + SignedVoteResponse signed_vote_response = 4; + SignProposalRequest sign_proposal_request = 5; + SignedProposalResponse signed_proposal_response = 6; + PingRequest ping_request = 7; + PingResponse ping_response = 8; + } +} + +// AuthSigMessage is duplicated from p2p prior to the P2P refactor. +// It is used for the SecretConnection until we migrate privval to gRPC. +// https://github.com/tendermint/tendermint/issues/4698 +message AuthSigMessage { + tendermint.crypto.PublicKey pub_key = 1 [(gogoproto.nullable) = false]; + bytes sig = 2; +} diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto new file mode 100644 index 000000000..ee948a406 --- /dev/null +++ b/proto/tendermint/rpc/grpc/types.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; +package tendermint.rpc.grpc; +option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; + +import "tendermint/abci/types.proto"; + +//---------------------------------------- +// Request types + +message RequestPing {} + +message RequestBroadcastTx { + bytes tx = 1; +} + +//---------------------------------------- +// Response types + +message ResponsePing {} + +message ResponseBroadcastTx { + tendermint.abci.ResponseCheckTx check_tx = 1; + tendermint.abci.ResponseDeliverTx deliver_tx = 2; +} + +//---------------------------------------- +// Service Definition + +service BroadcastAPI { + rpc Ping(RequestPing) returns (ResponsePing); + rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); +} diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go new file mode 100644 index 000000000..d94724fff --- /dev/null +++ b/proto/tendermint/state/types.pb.go @@ -0,0 +1,1973 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/state/types.proto + +package state + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" + version "github.com/tendermint/tendermint/proto/tendermint/version" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +type ABCIResponses struct { + DeliverTxs []*types.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + EndBlock *types.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` + BeginBlock *types.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` +} + +func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } +func (m *ABCIResponses) String() string { return proto.CompactTextString(m) } +func (*ABCIResponses) ProtoMessage() {} +func (*ABCIResponses) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{0} +} +func (m *ABCIResponses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ABCIResponses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ABCIResponses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ABCIResponses) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABCIResponses.Merge(m, src) +} +func (m *ABCIResponses) XXX_Size() int { + return m.Size() +} +func (m *ABCIResponses) XXX_DiscardUnknown() { + xxx_messageInfo_ABCIResponses.DiscardUnknown(m) +} + +var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo + +func (m *ABCIResponses) GetDeliverTxs() []*types.ResponseDeliverTx { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *ABCIResponses) GetEndBlock() *types.ResponseEndBlock { + if m != nil { + return m.EndBlock + } + return nil +} + +func (m *ABCIResponses) GetBeginBlock() *types.ResponseBeginBlock { + if m != nil { + return m.BeginBlock + } + return nil +} + +// ValidatorsInfo represents the latest validator set, or the last height it changed +type ValidatorsInfo struct { + ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ValidatorsInfo) Reset() { *m = ValidatorsInfo{} } +func (m *ValidatorsInfo) String() string { return proto.CompactTextString(m) } +func (*ValidatorsInfo) ProtoMessage() {} +func (*ValidatorsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{1} +} +func (m *ValidatorsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorsInfo.Merge(m, src) +} +func (m *ValidatorsInfo) XXX_Size() int { + return m.Size() +} +func (m *ValidatorsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorsInfo proto.InternalMessageInfo + +func (m *ValidatorsInfo) GetValidatorSet() *types1.ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +func (m *ValidatorsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightChanged int64 `protobuf:"varint,2,opt,name=last_height_changed,json=lastHeightChanged,proto3" json:"last_height_changed,omitempty"` +} + +func (m *ConsensusParamsInfo) Reset() { *m = ConsensusParamsInfo{} } +func (m *ConsensusParamsInfo) String() string { return proto.CompactTextString(m) } +func (*ConsensusParamsInfo) ProtoMessage() {} +func (*ConsensusParamsInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{2} +} +func (m *ConsensusParamsInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParamsInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParamsInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParamsInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParamsInfo.Merge(m, src) +} +func (m *ConsensusParamsInfo) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParamsInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParamsInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParamsInfo proto.InternalMessageInfo + +func (m *ConsensusParamsInfo) GetConsensusParams() types1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return types1.ConsensusParams{} +} + +func (m *ConsensusParamsInfo) GetLastHeightChanged() int64 { + if m != nil { + return m.LastHeightChanged + } + return 0 +} + +type Version struct { + Consensus version.Consensus `protobuf:"bytes,1,opt,name=consensus,proto3" json:"consensus"` + Software string `protobuf:"bytes,2,opt,name=software,proto3" json:"software,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{3} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetConsensus() version.Consensus { + if m != nil { + return m.Consensus + } + return version.Consensus{} +} + +func (m *Version) GetSoftware() string { + if m != nil { + return m.Software + } + return "" +} + +type State struct { + Version Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + // immutable + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight int64 `protobuf:"varint,14,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockID types1.BlockID `protobuf:"bytes,4,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + LastBlockTime time.Time `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3,stdtime" json:"last_block_time"` + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + NextValidators *types1.ValidatorSet `protobuf:"bytes,6,opt,name=next_validators,json=nextValidators,proto3" json:"next_validators,omitempty"` + Validators *types1.ValidatorSet `protobuf:"bytes,7,opt,name=validators,proto3" json:"validators,omitempty"` + LastValidators *types1.ValidatorSet `protobuf:"bytes,8,opt,name=last_validators,json=lastValidators,proto3" json:"last_validators,omitempty"` + LastHeightValidatorsChanged int64 `protobuf:"varint,9,opt,name=last_height_validators_changed,json=lastHeightValidatorsChanged,proto3" json:"last_height_validators_changed,omitempty"` + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + ConsensusParams types1.ConsensusParams `protobuf:"bytes,10,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` + LastHeightConsensusParamsChanged int64 `protobuf:"varint,11,opt,name=last_height_consensus_params_changed,json=lastHeightConsensusParamsChanged,proto3" json:"last_height_consensus_params_changed,omitempty"` + // Merkle root of the results from executing prev block + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // the latest AppHash we've received from calling abci.Commit() + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` +} + +func (m *State) Reset() { *m = State{} } +func (m *State) String() string { return proto.CompactTextString(m) } +func (*State) ProtoMessage() {} +func (*State) Descriptor() ([]byte, []int) { + return fileDescriptor_ccfacf933f22bf93, []int{4} +} +func (m *State) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_State.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) +} +func (m *State) XXX_Size() int { + return m.Size() +} +func (m *State) XXX_DiscardUnknown() { + xxx_messageInfo_State.DiscardUnknown(m) +} + +var xxx_messageInfo_State proto.InternalMessageInfo + +func (m *State) GetVersion() Version { + if m != nil { + return m.Version + } + return Version{} +} + +func (m *State) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *State) GetInitialHeight() int64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +func (m *State) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight + } + return 0 +} + +func (m *State) GetLastBlockID() types1.BlockID { + if m != nil { + return m.LastBlockID + } + return types1.BlockID{} +} + +func (m *State) GetLastBlockTime() time.Time { + if m != nil { + return m.LastBlockTime + } + return time.Time{} +} + +func (m *State) GetNextValidators() *types1.ValidatorSet { + if m != nil { + return m.NextValidators + } + return nil +} + +func (m *State) GetValidators() *types1.ValidatorSet { + if m != nil { + return m.Validators + } + return nil +} + +func (m *State) GetLastValidators() *types1.ValidatorSet { + if m != nil { + return m.LastValidators + } + return nil +} + +func (m *State) GetLastHeightValidatorsChanged() int64 { + if m != nil { + return m.LastHeightValidatorsChanged + } + return 0 +} + +func (m *State) GetConsensusParams() types1.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return types1.ConsensusParams{} +} + +func (m *State) GetLastHeightConsensusParamsChanged() int64 { + if m != nil { + return m.LastHeightConsensusParamsChanged + } + return 0 +} + +func (m *State) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *State) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func init() { + proto.RegisterType((*ABCIResponses)(nil), "tendermint.state.ABCIResponses") + proto.RegisterType((*ValidatorsInfo)(nil), "tendermint.state.ValidatorsInfo") + proto.RegisterType((*ConsensusParamsInfo)(nil), "tendermint.state.ConsensusParamsInfo") + proto.RegisterType((*Version)(nil), "tendermint.state.Version") + proto.RegisterType((*State)(nil), "tendermint.state.State") +} + +func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } + +var fileDescriptor_ccfacf933f22bf93 = []byte{ + // 763 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30, + 0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34, + 0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03, + 0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff, + 0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10, + 0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c, + 0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08, + 0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d, + 0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84, + 0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9, + 0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70, + 0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43, + 0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0, + 0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5, + 0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80, + 0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd, + 0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb, + 0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba, + 0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89, + 0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6, + 0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e, + 0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f, + 0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c, + 0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69, + 0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41, + 0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8, + 0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7, + 0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6, + 0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71, + 0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07, + 0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89, + 0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc, + 0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56, + 0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9, + 0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9, + 0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35, + 0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57, + 0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83, + 0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd, + 0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f, + 0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72, + 0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8, + 0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07, + 0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab, + 0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e, + 0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4, + 0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00, +} + +func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ABCIResponses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatorsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConsensusParamsInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParamsInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParamsInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastHeightChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightChanged)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Software) > 0 { + i -= len(m.Software) + copy(dAtA[i:], m.Software) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Software))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Consensus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *State) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *State) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x70 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if m.LastHeightConsensusParamsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightConsensusParamsChanged)) + i-- + dAtA[i] = 0x58 + } + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.LastHeightValidatorsChanged != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastHeightValidatorsChanged)) + i-- + dAtA[i] = 0x48 + } + if m.LastValidators != nil { + { + size, err := m.LastValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Validators != nil { + { + size, err := m.Validators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.NextValidators != nil { + { + size, err := m.NextValidators.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintTypes(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastBlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ABCIResponses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *ConsensusParamsInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightChanged)) + } + return n +} + +func (m *Version) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Consensus.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Software) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *State) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = m.LastBlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime) + n += 1 + l + sovTypes(uint64(l)) + if m.NextValidators != nil { + l = m.NextValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Validators != nil { + l = m.Validators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastValidators != nil { + l = m.LastValidators.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastHeightValidatorsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightValidatorsChanged)) + } + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.LastHeightConsensusParamsChanged != 0 { + n += 1 + sovTypes(uint64(m.LastHeightConsensusParamsChanged)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ABCIResponses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ABCIResponses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &types.ResponseDeliverTx{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EndBlock == nil { + m.EndBlock = &types.ResponseEndBlock{} + } + if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeginBlock == nil { + m.BeginBlock = &types.ResponseBeginBlock{} + } + if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &types1.ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsensusParamsInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParamsInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParamsInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightChanged", wireType) + } + m.LastHeightChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Consensus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Consensus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Software", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Software = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *State) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: State: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastBlockTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NextValidators == nil { + m.NextValidators = &types1.ValidatorSet{} + } + if err := m.NextValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Validators == nil { + m.Validators = &types1.ValidatorSet{} + } + if err := m.Validators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastValidators == nil { + m.LastValidators = &types1.ValidatorSet{} + } + if err := m.LastValidators.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightValidatorsChanged", wireType) + } + m.LastHeightValidatorsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightValidatorsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeightConsensusParamsChanged", wireType) + } + m.LastHeightConsensusParamsChanged = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastHeightConsensusParamsChanged |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto new file mode 100644 index 000000000..919da91e5 --- /dev/null +++ b/proto/tendermint/state/types.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package tendermint.state; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/state"; + +import "gogoproto/gogo.proto"; +import "tendermint/abci/types.proto"; +import "tendermint/types/types.proto"; +import "tendermint/types/validator.proto"; +import "tendermint/types/params.proto"; +import "tendermint/version/types.proto"; +import "google/protobuf/timestamp.proto"; + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +message ABCIResponses { + repeated tendermint.abci.ResponseDeliverTx deliver_txs = 1; + tendermint.abci.ResponseEndBlock end_block = 2; + tendermint.abci.ResponseBeginBlock begin_block = 3; +} + +// ValidatorsInfo represents the latest validator set, or the last height it changed +message ValidatorsInfo { + tendermint.types.ValidatorSet validator_set = 1; + int64 last_height_changed = 2; +} + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +message ConsensusParamsInfo { + tendermint.types.ConsensusParams consensus_params = 1 [(gogoproto.nullable) = false]; + int64 last_height_changed = 2; +} + +message Version { + tendermint.version.Consensus consensus = 1 [(gogoproto.nullable) = false]; + string software = 2; +} + +message State { + Version version = 1 [(gogoproto.nullable) = false]; + + // immutable + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 initial_height = 14; + + // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) + int64 last_block_height = 3; + tendermint.types.BlockID last_block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "LastBlockID"]; + google.protobuf.Timestamp last_block_time = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // LastValidators is used to validate block.LastCommit. + // Validators are persisted to the database separately every time they change, + // so we can query for historical validator sets. + // Note that if s.LastBlockHeight causes a valset change, + // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 + // Extra +1 due to nextValSet delay. + tendermint.types.ValidatorSet next_validators = 6; + tendermint.types.ValidatorSet validators = 7; + tendermint.types.ValidatorSet last_validators = 8; + int64 last_height_validators_changed = 9; + + // Consensus parameters used for validating blocks. + // Changes returned by EndBlock and updated after Commit. + tendermint.types.ConsensusParams consensus_params = 10 [(gogoproto.nullable) = false]; + int64 last_height_consensus_params_changed = 11; + + // Merkle root of the results from executing prev block + bytes last_results_hash = 12; + + // the latest AppHash we've received from calling abci.Commit() + bytes app_hash = 13; +} diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go new file mode 100644 index 000000000..4bfe9605d --- /dev/null +++ b/proto/tendermint/statesync/types.pb.go @@ -0,0 +1,1643 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/statesync/types.proto + +package statesync + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Message struct { + // Types that are valid to be assigned to Sum: + // *Message_SnapshotsRequest + // *Message_SnapshotsResponse + // *Message_ChunkRequest + // *Message_ChunkResponse + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_SnapshotsRequest struct { + SnapshotsRequest *SnapshotsRequest `protobuf:"bytes,1,opt,name=snapshots_request,json=snapshotsRequest,proto3,oneof" json:"snapshots_request,omitempty"` +} +type Message_SnapshotsResponse struct { + SnapshotsResponse *SnapshotsResponse `protobuf:"bytes,2,opt,name=snapshots_response,json=snapshotsResponse,proto3,oneof" json:"snapshots_response,omitempty"` +} +type Message_ChunkRequest struct { + ChunkRequest *ChunkRequest `protobuf:"bytes,3,opt,name=chunk_request,json=chunkRequest,proto3,oneof" json:"chunk_request,omitempty"` +} +type Message_ChunkResponse struct { + ChunkResponse *ChunkResponse `protobuf:"bytes,4,opt,name=chunk_response,json=chunkResponse,proto3,oneof" json:"chunk_response,omitempty"` +} + +func (*Message_SnapshotsRequest) isMessage_Sum() {} +func (*Message_SnapshotsResponse) isMessage_Sum() {} +func (*Message_ChunkRequest) isMessage_Sum() {} +func (*Message_ChunkResponse) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetSnapshotsRequest() *SnapshotsRequest { + if x, ok := m.GetSum().(*Message_SnapshotsRequest); ok { + return x.SnapshotsRequest + } + return nil +} + +func (m *Message) GetSnapshotsResponse() *SnapshotsResponse { + if x, ok := m.GetSum().(*Message_SnapshotsResponse); ok { + return x.SnapshotsResponse + } + return nil +} + +func (m *Message) GetChunkRequest() *ChunkRequest { + if x, ok := m.GetSum().(*Message_ChunkRequest); ok { + return x.ChunkRequest + } + return nil +} + +func (m *Message) GetChunkResponse() *ChunkResponse { + if x, ok := m.GetSum().(*Message_ChunkResponse); ok { + return x.ChunkResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_SnapshotsRequest)(nil), + (*Message_SnapshotsResponse)(nil), + (*Message_ChunkRequest)(nil), + (*Message_ChunkResponse)(nil), + } +} + +type SnapshotsRequest struct { +} + +func (m *SnapshotsRequest) Reset() { *m = SnapshotsRequest{} } +func (m *SnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotsRequest) ProtoMessage() {} +func (*SnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{1} +} +func (m *SnapshotsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotsRequest.Merge(m, src) +} +func (m *SnapshotsRequest) XXX_Size() int { + return m.Size() +} +func (m *SnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotsRequest proto.InternalMessageInfo + +type SnapshotsResponse struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (m *SnapshotsResponse) Reset() { *m = SnapshotsResponse{} } +func (m *SnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotsResponse) ProtoMessage() {} +func (*SnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{2} +} +func (m *SnapshotsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SnapshotsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SnapshotsResponse.Merge(m, src) +} +func (m *SnapshotsResponse) XXX_Size() int { + return m.Size() +} +func (m *SnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SnapshotsResponse proto.InternalMessageInfo + +func (m *SnapshotsResponse) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *SnapshotsResponse) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *SnapshotsResponse) GetChunks() uint32 { + if m != nil { + return m.Chunks + } + return 0 +} + +func (m *SnapshotsResponse) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *SnapshotsResponse) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + +type ChunkRequest struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *ChunkRequest) Reset() { *m = ChunkRequest{} } +func (m *ChunkRequest) String() string { return proto.CompactTextString(m) } +func (*ChunkRequest) ProtoMessage() {} +func (*ChunkRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{3} +} +func (m *ChunkRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkRequest.Merge(m, src) +} +func (m *ChunkRequest) XXX_Size() int { + return m.Size() +} +func (m *ChunkRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkRequest proto.InternalMessageInfo + +func (m *ChunkRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ChunkRequest) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *ChunkRequest) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type ChunkResponse struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Index uint32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + Chunk []byte `protobuf:"bytes,4,opt,name=chunk,proto3" json:"chunk,omitempty"` + Missing bool `protobuf:"varint,5,opt,name=missing,proto3" json:"missing,omitempty"` +} + +func (m *ChunkResponse) Reset() { *m = ChunkResponse{} } +func (m *ChunkResponse) String() string { return proto.CompactTextString(m) } +func (*ChunkResponse) ProtoMessage() {} +func (*ChunkResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{4} +} +func (m *ChunkResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkResponse.Merge(m, src) +} +func (m *ChunkResponse) XXX_Size() int { + return m.Size() +} +func (m *ChunkResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkResponse proto.InternalMessageInfo + +func (m *ChunkResponse) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ChunkResponse) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *ChunkResponse) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *ChunkResponse) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *ChunkResponse) GetMissing() bool { + if m != nil { + return m.Missing + } + return false +} + +func init() { + proto.RegisterType((*Message)(nil), "tendermint.statesync.Message") + proto.RegisterType((*SnapshotsRequest)(nil), "tendermint.statesync.SnapshotsRequest") + proto.RegisterType((*SnapshotsResponse)(nil), "tendermint.statesync.SnapshotsResponse") + proto.RegisterType((*ChunkRequest)(nil), "tendermint.statesync.ChunkRequest") + proto.RegisterType((*ChunkResponse)(nil), "tendermint.statesync.ChunkResponse") +} + +func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } + +var fileDescriptor_a1c2869546ca7914 = []byte{ + // 393 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x6a, 0xdb, 0x40, + 0x18, 0x94, 0xfc, 0xcf, 0x57, 0xab, 0xd8, 0x8b, 0x29, 0xa2, 0x07, 0x61, 0x54, 0x68, 0x7b, 0x92, + 0xa0, 0x3d, 0xf6, 0xe6, 0x5e, 0x5c, 0x68, 0x2f, 0xdb, 0x18, 0x42, 0x2e, 0x61, 0x2d, 0x6f, 0x24, + 0x11, 0xb4, 0x52, 0xf4, 0xad, 0x20, 0x7e, 0x80, 0x9c, 0x72, 0xc9, 0x63, 0xe5, 0xe8, 0x63, 0xc8, + 0x29, 0xd8, 0x2f, 0x12, 0xb4, 0x92, 0x65, 0xc5, 0x31, 0x09, 0x81, 0xdc, 0x76, 0xc6, 0xe3, 0xd1, + 0xcc, 0xc0, 0x07, 0x63, 0xc9, 0xc5, 0x82, 0xa7, 0x51, 0x28, 0xa4, 0x8b, 0x92, 0x49, 0x8e, 0x4b, + 0xe1, 0xb9, 0x72, 0x99, 0x70, 0x74, 0x92, 0x34, 0x96, 0x31, 0x19, 0xed, 0x14, 0x4e, 0xa5, 0xb0, + 0xef, 0x1b, 0xd0, 0xfd, 0xc7, 0x11, 0x99, 0xcf, 0xc9, 0x0c, 0x86, 0x28, 0x58, 0x82, 0x41, 0x2c, + 0xf1, 0x34, 0xe5, 0x17, 0x19, 0x47, 0x69, 0xea, 0x63, 0xfd, 0xfb, 0x87, 0x1f, 0x5f, 0x9d, 0x43, + 0xff, 0x76, 0xfe, 0x6f, 0xe5, 0xb4, 0x50, 0x4f, 0x35, 0x3a, 0xc0, 0x3d, 0x8e, 0x1c, 0x03, 0xa9, + 0xdb, 0x62, 0x12, 0x0b, 0xe4, 0x66, 0x43, 0xf9, 0x7e, 0x7b, 0xd5, 0xb7, 0x90, 0x4f, 0x35, 0x3a, + 0xc4, 0x7d, 0x92, 0xfc, 0x01, 0xc3, 0x0b, 0x32, 0x71, 0x5e, 0x85, 0x6d, 0x2a, 0x53, 0xfb, 0xb0, + 0xe9, 0xef, 0x5c, 0xba, 0x0b, 0xda, 0xf7, 0x6a, 0x98, 0xfc, 0x85, 0x8f, 0x5b, 0xab, 0x32, 0x60, + 0x4b, 0x79, 0x7d, 0x79, 0xd1, 0xab, 0x0a, 0x67, 0x78, 0x75, 0x62, 0xd2, 0x86, 0x26, 0x66, 0x91, + 0x4d, 0x60, 0xb0, 0xbf, 0x90, 0x7d, 0xad, 0xc3, 0xf0, 0x59, 0x3d, 0xf2, 0x09, 0x3a, 0x01, 0x0f, + 0xfd, 0xa0, 0xd8, 0xbb, 0x45, 0x4b, 0x94, 0xf3, 0x67, 0x71, 0x1a, 0x31, 0xa9, 0xf6, 0x32, 0x68, + 0x89, 0x72, 0x5e, 0x7d, 0x11, 0x55, 0x65, 0x83, 0x96, 0x88, 0x10, 0x68, 0x05, 0x0c, 0x03, 0x15, + 0xbe, 0x4f, 0xd5, 0x9b, 0x7c, 0x86, 0x5e, 0xc4, 0x25, 0x5b, 0x30, 0xc9, 0xcc, 0xb6, 0xe2, 0x2b, + 0x6c, 0x1f, 0x41, 0xbf, 0x3e, 0xcb, 0x9b, 0x73, 0x8c, 0xa0, 0x1d, 0x8a, 0x05, 0xbf, 0x2c, 0x63, + 0x14, 0xc0, 0xbe, 0xd2, 0xc1, 0x78, 0xb2, 0xd0, 0xfb, 0xf8, 0xe6, 0xac, 0xea, 0x59, 0xd6, 0x2b, + 0x00, 0x31, 0xa1, 0x1b, 0x85, 0x88, 0xa1, 0xf0, 0x55, 0xbd, 0x1e, 0xdd, 0xc2, 0xc9, 0xec, 0x76, + 0x6d, 0xe9, 0xab, 0xb5, 0xa5, 0x3f, 0xac, 0x2d, 0xfd, 0x66, 0x63, 0x69, 0xab, 0x8d, 0xa5, 0xdd, + 0x6d, 0x2c, 0xed, 0xe4, 0x97, 0x1f, 0xca, 0x20, 0x9b, 0x3b, 0x5e, 0x1c, 0xb9, 0xb5, 0xcb, 0xa9, + 0x3d, 0xd5, 0xd1, 0xb8, 0x87, 0xae, 0x6a, 0xde, 0x51, 0xbf, 0xfd, 0x7c, 0x0c, 0x00, 0x00, 0xff, + 0xff, 0xcc, 0x16, 0xc2, 0x8b, 0x74, 0x03, 0x00, 0x00, +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_SnapshotsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SnapshotsRequest != nil { + { + size, err := m.SnapshotsRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_SnapshotsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_SnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SnapshotsResponse != nil { + { + size, err := m.SnapshotsResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_ChunkRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ChunkRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ChunkRequest != nil { + { + size, err := m.ChunkRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Message_ChunkResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ChunkResponse != nil { + { + size, err := m.ChunkResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *SnapshotsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *SnapshotsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metadata) > 0 { + i -= len(m.Metadata) + copy(dAtA[i:], m.Metadata) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) + i-- + dAtA[i] = 0x2a + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if m.Chunks != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChunkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChunkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Missing { + i-- + if m.Missing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0x22 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_SnapshotsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SnapshotsRequest != nil { + l = m.SnapshotsRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_SnapshotsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SnapshotsResponse != nil { + l = m.SnapshotsResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ChunkRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChunkRequest != nil { + l = m.ChunkRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ChunkResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ChunkResponse != nil { + l = m.ChunkResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *SnapshotsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *SnapshotsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovTypes(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Metadata) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ChunkRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *ChunkResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Missing { + n += 2 + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotsRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotsRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SnapshotsRequest{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotsResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SnapshotsResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_SnapshotsResponse{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ChunkRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ChunkRequest{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ChunkResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ChunkResponse{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + m.Chunks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunks |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata[:0], dAtA[iNdEx:postIndex]...) + if m.Metadata == nil { + m.Metadata = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Missing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Missing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto new file mode 100644 index 000000000..8d4a714c1 --- /dev/null +++ b/proto/tendermint/statesync/types.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; +package tendermint.statesync; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; + +message Message { + oneof sum { + SnapshotsRequest snapshots_request = 1; + SnapshotsResponse snapshots_response = 2; + ChunkRequest chunk_request = 3; + ChunkResponse chunk_response = 4; + } +} + +message SnapshotsRequest {} + +message SnapshotsResponse { + uint64 height = 1; + uint32 format = 2; + uint32 chunks = 3; + bytes hash = 4; + bytes metadata = 5; +} + +message ChunkRequest { + uint64 height = 1; + uint32 format = 2; + uint32 index = 3; +} + +message ChunkResponse { + uint64 height = 1; + uint32 format = 2; + uint32 index = 3; + bytes chunk = 4; + bool missing = 5; +} diff --git a/proto/tendermint/store/types.pb.go b/proto/tendermint/store/types.pb.go new file mode 100644 index 000000000..c18c88fa4 --- /dev/null +++ b/proto/tendermint/store/types.pb.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/store/types.proto + +package store + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type BlockStoreState struct { + Base int64 `protobuf:"varint,1,opt,name=base,proto3" json:"base,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *BlockStoreState) Reset() { *m = BlockStoreState{} } +func (m *BlockStoreState) String() string { return proto.CompactTextString(m) } +func (*BlockStoreState) ProtoMessage() {} +func (*BlockStoreState) Descriptor() ([]byte, []int) { + return fileDescriptor_ff9e53a0a74267f7, []int{0} +} +func (m *BlockStoreState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockStoreState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockStoreState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockStoreState) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockStoreState.Merge(m, src) +} +func (m *BlockStoreState) XXX_Size() int { + return m.Size() +} +func (m *BlockStoreState) XXX_DiscardUnknown() { + xxx_messageInfo_BlockStoreState.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockStoreState proto.InternalMessageInfo + +func (m *BlockStoreState) GetBase() int64 { + if m != nil { + return m.Base + } + return 0 +} + +func (m *BlockStoreState) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func init() { + proto.RegisterType((*BlockStoreState)(nil), "tendermint.store.BlockStoreState") +} + +func init() { proto.RegisterFile("tendermint/store/types.proto", fileDescriptor_ff9e53a0a74267f7) } + +var fileDescriptor_ff9e53a0a74267f7 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0xa9, 0x2c, + 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0x95, + 0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0xbc, 0xe0, 0x92, 0xc4, 0x92, 0x54, + 0x21, 0x21, 0x2e, 0x96, 0xa4, 0xc4, 0xe2, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x30, + 0x5b, 0x48, 0x8c, 0x8b, 0x2d, 0x23, 0x35, 0x33, 0x3d, 0xa3, 0x44, 0x82, 0x09, 0x2c, 0x0a, 0xe5, + 0x39, 0x05, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, + 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x79, 0x7a, 0x66, + 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x92, 0x9b, 0x90, 0x98, 0x60, 0x27, 0xe9, + 0xa3, 0xbb, 0x37, 0x89, 0x0d, 0x2c, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xef, 0xa6, 0x30, + 0x63, 0xca, 0x00, 0x00, 0x00, +} + +func (m *BlockStoreState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockStoreState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockStoreState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Base != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Base)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BlockStoreState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Base != 0 { + n += 1 + sovTypes(uint64(m.Base)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BlockStoreState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockStoreState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockStoreState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType) + } + m.Base = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Base |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/store/types.proto b/proto/tendermint/store/types.proto new file mode 100644 index 000000000..af2f97ad0 --- /dev/null +++ b/proto/tendermint/store/types.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package tendermint.store; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/store"; + +message BlockStoreState { + int64 base = 1; + int64 height = 2; +} diff --git a/proto/tendermint/types/block.pb.go b/proto/tendermint/types/block.pb.go new file mode 100644 index 000000000..aacb90fab --- /dev/null +++ b/proto/tendermint/types/block.pb.go @@ -0,0 +1,493 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/block.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Block struct { + Header Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header"` + Data Data `protobuf:"bytes,2,opt,name=data,proto3" json:"data"` + Evidence EvidenceList `protobuf:"bytes,3,opt,name=evidence,proto3" json:"evidence"` + LastCommit *Commit `protobuf:"bytes,4,opt,name=last_commit,json=lastCommit,proto3" json:"last_commit,omitempty"` +} + +func (m *Block) Reset() { *m = Block{} } +func (m *Block) String() string { return proto.CompactTextString(m) } +func (*Block) ProtoMessage() {} +func (*Block) Descriptor() ([]byte, []int) { + return fileDescriptor_70840e82f4357ab1, []int{0} +} +func (m *Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(m, src) +} +func (m *Block) XXX_Size() int { + return m.Size() +} +func (m *Block) XXX_DiscardUnknown() { + xxx_messageInfo_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Block proto.InternalMessageInfo + +func (m *Block) GetHeader() Header { + if m != nil { + return m.Header + } + return Header{} +} + +func (m *Block) GetData() Data { + if m != nil { + return m.Data + } + return Data{} +} + +func (m *Block) GetEvidence() EvidenceList { + if m != nil { + return m.Evidence + } + return EvidenceList{} +} + +func (m *Block) GetLastCommit() *Commit { + if m != nil { + return m.LastCommit + } + return nil +} + +func init() { + proto.RegisterType((*Block)(nil), "tendermint.types.Block") +} + +func init() { proto.RegisterFile("tendermint/types/block.proto", fileDescriptor_70840e82f4357ab1) } + +var fileDescriptor_70840e82f4357ab1 = []byte{ + // 266 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xca, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0xa5, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x14, 0xa6, 0x29, 0x60, + 0x12, 0x2a, 0x2b, 0x8f, 0x21, 0x9b, 0x5a, 0x96, 0x99, 0x92, 0x9a, 0x97, 0x9c, 0x0a, 0x51, 0xa0, + 0xf4, 0x8e, 0x91, 0x8b, 0xd5, 0x09, 0x64, 0xad, 0x90, 0x19, 0x17, 0x5b, 0x46, 0x6a, 0x62, 0x4a, + 0x6a, 0x91, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x84, 0x1e, 0xba, 0x0b, 0xf4, 0x3c, 0xc0, + 0xf2, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x41, 0x55, 0x0b, 0x19, 0x70, 0xb1, 0xa4, 0x24, + 0x96, 0x24, 0x4a, 0x30, 0x81, 0x75, 0x89, 0x61, 0xea, 0x72, 0x49, 0x2c, 0x49, 0x84, 0xea, 0x01, + 0xab, 0x14, 0x72, 0xe0, 0xe2, 0x80, 0xb9, 0x42, 0x82, 0x19, 0xac, 0x4b, 0x0e, 0x53, 0x97, 0x2b, + 0x54, 0x85, 0x4f, 0x66, 0x71, 0x09, 0x54, 0x37, 0x5c, 0x97, 0x90, 0x25, 0x17, 0x77, 0x4e, 0x62, + 0x71, 0x49, 0x7c, 0x72, 0x7e, 0x6e, 0x6e, 0x66, 0x89, 0x04, 0x0b, 0x2e, 0x07, 0x3b, 0x83, 0xe5, + 0x83, 0xb8, 0x40, 0x8a, 0x21, 0x6c, 0xa7, 0xc0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, + 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, + 0x63, 0x88, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x47, 0x0e, + 0x36, 0x04, 0x13, 0x12, 0xf8, 0xe8, 0x41, 0x9a, 0xc4, 0x06, 0x16, 0x37, 0x06, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x79, 0x8c, 0xb5, 0x43, 0xd1, 0x01, 0x00, 0x00, +} + +func (m *Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Block) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastCommit != nil { + { + size, err := m.LastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBlock(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintBlock(dAtA []byte, offset int, v uint64) int { + offset -= sovBlock(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Header.Size() + n += 1 + l + sovBlock(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovBlock(uint64(l)) + l = m.Evidence.Size() + n += 1 + l + sovBlock(uint64(l)) + if m.LastCommit != nil { + l = m.LastCommit.Size() + n += 1 + l + sovBlock(uint64(l)) + } + return n +} + +func sovBlock(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBlock(x uint64) (n int) { + return sovBlock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBlock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBlock + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBlock + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastCommit == nil { + m.LastCommit = &Commit{} + } + if err := m.LastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBlock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthBlock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBlock(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBlock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBlock + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBlock + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBlock + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBlock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBlock = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBlock = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/block.proto b/proto/tendermint/types/block.proto new file mode 100644 index 000000000..84e9bb15d --- /dev/null +++ b/proto/tendermint/types/block.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "tendermint/types/types.proto"; +import "tendermint/types/evidence.proto"; + +message Block { + Header header = 1 [(gogoproto.nullable) = false]; + Data data = 2 [(gogoproto.nullable) = false]; + tendermint.types.EvidenceList evidence = 3 [(gogoproto.nullable) = false]; + Commit last_commit = 4; +} diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go new file mode 100644 index 000000000..38b17ddb1 --- /dev/null +++ b/proto/tendermint/types/canonical.pb.go @@ -0,0 +1,1390 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/canonical.proto + +package types + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CanonicalBlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartSetHeader CanonicalPartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` +} + +func (m *CanonicalBlockID) Reset() { *m = CanonicalBlockID{} } +func (m *CanonicalBlockID) String() string { return proto.CompactTextString(m) } +func (*CanonicalBlockID) ProtoMessage() {} +func (*CanonicalBlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_8d1a1a84ff7267ed, []int{0} +} +func (m *CanonicalBlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalBlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalBlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalBlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalBlockID.Merge(m, src) +} +func (m *CanonicalBlockID) XXX_Size() int { + return m.Size() +} +func (m *CanonicalBlockID) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalBlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalBlockID proto.InternalMessageInfo + +func (m *CanonicalBlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *CanonicalBlockID) GetPartSetHeader() CanonicalPartSetHeader { + if m != nil { + return m.PartSetHeader + } + return CanonicalPartSetHeader{} +} + +type CanonicalPartSetHeader struct { + Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *CanonicalPartSetHeader) Reset() { *m = CanonicalPartSetHeader{} } +func (m *CanonicalPartSetHeader) String() string { return proto.CompactTextString(m) } +func (*CanonicalPartSetHeader) ProtoMessage() {} +func (*CanonicalPartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_8d1a1a84ff7267ed, []int{1} +} +func (m *CanonicalPartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalPartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalPartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalPartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalPartSetHeader.Merge(m, src) +} +func (m *CanonicalPartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *CanonicalPartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalPartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalPartSetHeader proto.InternalMessageInfo + +func (m *CanonicalPartSetHeader) GetTotal() uint32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *CanonicalPartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +type CanonicalProposal struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` + Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` + POLRound int64 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` + BlockID *CanonicalBlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ChainID string `protobuf:"bytes,7,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *CanonicalProposal) Reset() { *m = CanonicalProposal{} } +func (m *CanonicalProposal) String() string { return proto.CompactTextString(m) } +func (*CanonicalProposal) ProtoMessage() {} +func (*CanonicalProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_8d1a1a84ff7267ed, []int{2} +} +func (m *CanonicalProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalProposal.Merge(m, src) +} +func (m *CanonicalProposal) XXX_Size() int { + return m.Size() +} +func (m *CanonicalProposal) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalProposal proto.InternalMessageInfo + +func (m *CanonicalProposal) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *CanonicalProposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *CanonicalProposal) GetRound() int64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CanonicalProposal) GetPOLRound() int64 { + if m != nil { + return m.POLRound + } + return 0 +} + +func (m *CanonicalProposal) GetBlockID() *CanonicalBlockID { + if m != nil { + return m.BlockID + } + return nil +} + +func (m *CanonicalProposal) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CanonicalProposal) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +type CanonicalVote struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` + Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID *CanonicalBlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ChainID string `protobuf:"bytes,6,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *CanonicalVote) Reset() { *m = CanonicalVote{} } +func (m *CanonicalVote) String() string { return proto.CompactTextString(m) } +func (*CanonicalVote) ProtoMessage() {} +func (*CanonicalVote) Descriptor() ([]byte, []int) { + return fileDescriptor_8d1a1a84ff7267ed, []int{3} +} +func (m *CanonicalVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalVote.Merge(m, src) +} +func (m *CanonicalVote) XXX_Size() int { + return m.Size() +} +func (m *CanonicalVote) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalVote.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalVote proto.InternalMessageInfo + +func (m *CanonicalVote) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *CanonicalVote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *CanonicalVote) GetRound() int64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CanonicalVote) GetBlockID() *CanonicalBlockID { + if m != nil { + return m.BlockID + } + return nil +} + +func (m *CanonicalVote) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CanonicalVote) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func init() { + proto.RegisterType((*CanonicalBlockID)(nil), "tendermint.types.CanonicalBlockID") + proto.RegisterType((*CanonicalPartSetHeader)(nil), "tendermint.types.CanonicalPartSetHeader") + proto.RegisterType((*CanonicalProposal)(nil), "tendermint.types.CanonicalProposal") + proto.RegisterType((*CanonicalVote)(nil), "tendermint.types.CanonicalVote") +} + +func init() { proto.RegisterFile("tendermint/types/canonical.proto", fileDescriptor_8d1a1a84ff7267ed) } + +var fileDescriptor_8d1a1a84ff7267ed = []byte{ + // 487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0x3d, 0x6f, 0xd3, 0x40, + 0x18, 0xce, 0xa5, 0x4e, 0xe2, 0x5c, 0x1b, 0x08, 0xa7, 0xaa, 0xb2, 0x22, 0x64, 0x5b, 0x1e, 0x90, + 0x59, 0x6c, 0xa9, 0x1d, 0xd8, 0x5d, 0x06, 0x82, 0x40, 0x94, 0x6b, 0xd5, 0x81, 0x25, 0xba, 0xd8, + 0x87, 0x6d, 0xe1, 0xf8, 0x4e, 0xf6, 0x65, 0xe8, 0xc2, 0x6f, 0xe8, 0xef, 0xe0, 0x97, 0x74, 0xec, + 0x08, 0x4b, 0x40, 0xce, 0x1f, 0x41, 0x77, 0x4e, 0xec, 0xa8, 0x01, 0x16, 0x10, 0xcb, 0xe9, 0xfd, + 0x78, 0xee, 0x79, 0x1f, 0x3d, 0xaf, 0x5e, 0x68, 0x0b, 0x9a, 0x47, 0xb4, 0x58, 0xa4, 0xb9, 0xf0, + 0xc5, 0x0d, 0xa7, 0xa5, 0x1f, 0x92, 0x9c, 0xe5, 0x69, 0x48, 0x32, 0x8f, 0x17, 0x4c, 0x30, 0x34, + 0x6e, 0x11, 0x9e, 0x42, 0x4c, 0x8e, 0x63, 0x16, 0x33, 0xd5, 0xf4, 0x65, 0x54, 0xe3, 0x26, 0x4f, + 0xf7, 0x98, 0xd4, 0xbb, 0xe9, 0x5a, 0x31, 0x63, 0x71, 0x46, 0x7d, 0x95, 0xcd, 0x97, 0x1f, 0x7d, + 0x91, 0x2e, 0x68, 0x29, 0xc8, 0x82, 0xd7, 0x00, 0xe7, 0x33, 0x1c, 0x9f, 0x6f, 0x27, 0x07, 0x19, + 0x0b, 0x3f, 0x4d, 0x5f, 0x22, 0x04, 0xb5, 0x84, 0x94, 0x89, 0x01, 0x6c, 0xe0, 0x1e, 0x61, 0x15, + 0xa3, 0x6b, 0xf8, 0x98, 0x93, 0x42, 0xcc, 0x4a, 0x2a, 0x66, 0x09, 0x25, 0x11, 0x2d, 0x8c, 0xae, + 0x0d, 0xdc, 0xc3, 0x53, 0xd7, 0x7b, 0x28, 0xd4, 0x6b, 0x08, 0x2f, 0x48, 0x21, 0x2e, 0xa9, 0x78, + 0xa5, 0xf0, 0x81, 0x76, 0xb7, 0xb2, 0x3a, 0x78, 0xc4, 0x77, 0x8b, 0x4e, 0x00, 0x4f, 0x7e, 0x0d, + 0x47, 0xc7, 0xb0, 0x27, 0x98, 0x20, 0x99, 0x92, 0x31, 0xc2, 0x75, 0xd2, 0x68, 0xeb, 0xb6, 0xda, + 0x9c, 0x6f, 0x5d, 0xf8, 0xa4, 0x25, 0x29, 0x18, 0x67, 0x25, 0xc9, 0xd0, 0x19, 0xd4, 0xa4, 0x1c, + 0xf5, 0xfd, 0xd1, 0xa9, 0xb5, 0x2f, 0xf3, 0x32, 0x8d, 0x73, 0x1a, 0xbd, 0x2d, 0xe3, 0xab, 0x1b, + 0x4e, 0xb1, 0x02, 0xa3, 0x13, 0xd8, 0x4f, 0x68, 0x1a, 0x27, 0x42, 0x0d, 0x18, 0xe3, 0x4d, 0x26, + 0xc5, 0x14, 0x6c, 0x99, 0x47, 0xc6, 0x81, 0x2a, 0xd7, 0x09, 0x7a, 0x0e, 0x87, 0x9c, 0x65, 0xb3, + 0xba, 0xa3, 0xd9, 0xc0, 0x3d, 0x08, 0x8e, 0xaa, 0x95, 0xa5, 0x5f, 0xbc, 0x7b, 0x83, 0x65, 0x0d, + 0xeb, 0x9c, 0x65, 0x2a, 0x42, 0xaf, 0xa1, 0x3e, 0x97, 0xf6, 0xce, 0xd2, 0xc8, 0xe8, 0x29, 0xe3, + 0x9c, 0x3f, 0x18, 0xb7, 0xd9, 0x44, 0x70, 0x58, 0xad, 0xac, 0xc1, 0x26, 0xc1, 0x03, 0x45, 0x30, + 0x8d, 0x50, 0x00, 0x87, 0xcd, 0x1a, 0x8d, 0xbe, 0x22, 0x9b, 0x78, 0xf5, 0xa2, 0xbd, 0xed, 0xa2, + 0xbd, 0xab, 0x2d, 0x22, 0xd0, 0xa5, 0xef, 0xb7, 0xdf, 0x2d, 0x80, 0xdb, 0x6f, 0xe8, 0x19, 0xd4, + 0xc3, 0x84, 0xa4, 0xb9, 0xd4, 0x33, 0xb0, 0x81, 0x3b, 0xac, 0x67, 0x9d, 0xcb, 0x9a, 0x9c, 0xa5, + 0x9a, 0xd3, 0xc8, 0xf9, 0xd2, 0x85, 0xa3, 0x46, 0xd6, 0x35, 0x13, 0xf4, 0x7f, 0xf8, 0xba, 0x6b, + 0x96, 0xf6, 0x2f, 0xcd, 0xea, 0xfd, 0xbd, 0x59, 0xfd, 0xdf, 0x9b, 0x15, 0xbc, 0xbf, 0xab, 0x4c, + 0x70, 0x5f, 0x99, 0xe0, 0x47, 0x65, 0x82, 0xdb, 0xb5, 0xd9, 0xb9, 0x5f, 0x9b, 0x9d, 0xaf, 0x6b, + 0xb3, 0xf3, 0xe1, 0x45, 0x9c, 0x8a, 0x64, 0x39, 0xf7, 0x42, 0xb6, 0xf0, 0x77, 0x0f, 0xb6, 0x0d, + 0xeb, 0xc3, 0x7e, 0x78, 0xcc, 0xf3, 0xbe, 0xaa, 0x9f, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x6d, + 0xdd, 0x12, 0x5d, 0x31, 0x04, 0x00, 0x00, +} + +func (m *CanonicalBlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalBlockID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalBlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CanonicalPartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalPartSetHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalPartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Total != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CanonicalProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x3a + } + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintCanonical(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x32 + if m.BlockID != nil { + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.POLRound != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.POLRound)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Round)) + i-- + dAtA[i] = 0x19 + } + if m.Height != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Height)) + i-- + dAtA[i] = 0x11 + } + if m.Type != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CanonicalVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x32 + } + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintCanonical(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x2a + if m.BlockID != nil { + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCanonical(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Round != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Round)) + i-- + dAtA[i] = 0x19 + } + if m.Height != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Height)) + i-- + dAtA[i] = 0x11 + } + if m.Type != 0 { + i = encodeVarintCanonical(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintCanonical(dAtA []byte, offset int, v uint64) int { + offset -= sovCanonical(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CanonicalBlockID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + l = m.PartSetHeader.Size() + n += 1 + l + sovCanonical(uint64(l)) + return n +} + +func (m *CanonicalPartSetHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovCanonical(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func (m *CanonicalProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCanonical(uint64(m.Type)) + } + if m.Height != 0 { + n += 9 + } + if m.Round != 0 { + n += 9 + } + if m.POLRound != 0 { + n += 1 + sovCanonical(uint64(m.POLRound)) + } + if m.BlockID != nil { + l = m.BlockID.Size() + n += 1 + l + sovCanonical(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovCanonical(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func (m *CanonicalVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCanonical(uint64(m.Type)) + } + if m.Height != 0 { + n += 9 + } + if m.Round != 0 { + n += 9 + } + if m.BlockID != nil { + l = m.BlockID.Size() + n += 1 + l + sovCanonical(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovCanonical(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + +func sovCanonical(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCanonical(x uint64) (n int) { + return sovCanonical(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CanonicalBlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalBlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalBlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalPartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalPartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalPartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Height = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Round = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field POLRound", wireType) + } + m.POLRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.POLRound |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockID == nil { + m.BlockID = &CanonicalBlockID{} + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CanonicalVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Height = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Round = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlockID == nil { + m.BlockID = &CanonicalBlockID{} + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCanonical(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCanonical + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCanonical + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCanonical + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCanonical + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCanonical = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCanonical = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCanonical = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/canonical.proto b/proto/tendermint/types/canonical.proto new file mode 100644 index 000000000..e88fd6ffe --- /dev/null +++ b/proto/tendermint/types/canonical.proto @@ -0,0 +1,37 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "tendermint/types/types.proto"; +import "google/protobuf/timestamp.proto"; + +message CanonicalBlockID { + bytes hash = 1; + CanonicalPartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +message CanonicalPartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +message CanonicalProposal { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + int64 pol_round = 4 [(gogoproto.customname) = "POLRound"]; + CanonicalBlockID block_id = 5 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 7 [(gogoproto.customname) = "ChainID"]; +} + +message CanonicalVote { + SignedMsgType type = 1; // type alias for byte + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + CanonicalBlockID block_id = 4 [(gogoproto.customname) = "BlockID"]; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + string chain_id = 6 [(gogoproto.customname) = "ChainID"]; +} diff --git a/proto/tendermint/types/events.pb.go b/proto/tendermint/types/events.pb.go new file mode 100644 index 000000000..1c49aef64 --- /dev/null +++ b/proto/tendermint/types/events.pb.go @@ -0,0 +1,389 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EventDataRoundState struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + Step string `protobuf:"bytes,3,opt,name=step,proto3" json:"step,omitempty"` +} + +func (m *EventDataRoundState) Reset() { *m = EventDataRoundState{} } +func (m *EventDataRoundState) String() string { return proto.CompactTextString(m) } +func (*EventDataRoundState) ProtoMessage() {} +func (*EventDataRoundState) Descriptor() ([]byte, []int) { + return fileDescriptor_72cfafd446dedf7c, []int{0} +} +func (m *EventDataRoundState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDataRoundState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDataRoundState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDataRoundState) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDataRoundState.Merge(m, src) +} +func (m *EventDataRoundState) XXX_Size() int { + return m.Size() +} +func (m *EventDataRoundState) XXX_DiscardUnknown() { + xxx_messageInfo_EventDataRoundState.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDataRoundState proto.InternalMessageInfo + +func (m *EventDataRoundState) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *EventDataRoundState) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *EventDataRoundState) GetStep() string { + if m != nil { + return m.Step + } + return "" +} + +func init() { + proto.RegisterType((*EventDataRoundState)(nil), "tendermint.types.EventDataRoundState") +} + +func init() { proto.RegisterFile("tendermint/types/events.proto", fileDescriptor_72cfafd446dedf7c) } + +var fileDescriptor_72cfafd446dedf7c = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2d, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x2d, 0x4b, + 0xcd, 0x2b, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x48, 0xeb, 0x81, 0xa5, + 0x95, 0xc2, 0xb9, 0x84, 0x5d, 0x41, 0x2a, 0x5c, 0x12, 0x4b, 0x12, 0x83, 0xf2, 0x4b, 0xf3, 0x52, + 0x82, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xc4, 0xb8, 0xd8, 0x32, 0x52, 0x33, 0xd3, 0x33, 0x4a, 0x24, + 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0xa0, 0x3c, 0x21, 0x11, 0x2e, 0xd6, 0x22, 0x90, 0x2a, 0x09, + 0x26, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x08, 0x47, 0x48, 0x88, 0x8b, 0xa5, 0xb8, 0x24, 0xb5, 0x40, + 0x82, 0x59, 0x81, 0x51, 0x83, 0x33, 0x08, 0xcc, 0x76, 0x0a, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, + 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, + 0xc6, 0x63, 0x39, 0x86, 0x28, 0xf3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, + 0x7d, 0x64, 0xe7, 0x22, 0x98, 0x60, 0xc7, 0xea, 0xa3, 0x7b, 0x25, 0x89, 0x0d, 0x2c, 0x6e, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xc3, 0xe9, 0x14, 0x02, 0xe5, 0x00, 0x00, 0x00, +} + +func (m *EventDataRoundState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDataRoundState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDataRoundState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Step) > 0 { + i -= len(m.Step) + copy(dAtA[i:], m.Step) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Step))) + i-- + dAtA[i] = 0x1a + } + if m.Round != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventDataRoundState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovEvents(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovEvents(uint64(m.Round)) + } + l = len(m.Step) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventDataRoundState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDataRoundState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDataRoundState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Step = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/events.proto b/proto/tendermint/types/events.proto new file mode 100644 index 000000000..a1e5cc498 --- /dev/null +++ b/proto/tendermint/types/events.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +message EventDataRoundState { + int64 height = 1; + int32 round = 2; + string step = 3; +} diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go new file mode 100644 index 000000000..3d9e8f2c5 --- /dev/null +++ b/proto/tendermint/types/evidence.pb.go @@ -0,0 +1,1406 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/evidence.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Evidence struct { + // Types that are valid to be assigned to Sum: + // *Evidence_DuplicateVoteEvidence + // *Evidence_LightClientAttackEvidence + Sum isEvidence_Sum `protobuf_oneof:"sum"` +} + +func (m *Evidence) Reset() { *m = Evidence{} } +func (m *Evidence) String() string { return proto.CompactTextString(m) } +func (*Evidence) ProtoMessage() {} +func (*Evidence) Descriptor() ([]byte, []int) { + return fileDescriptor_6825fabc78e0a168, []int{0} +} +func (m *Evidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Evidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Evidence.Merge(m, src) +} +func (m *Evidence) XXX_Size() int { + return m.Size() +} +func (m *Evidence) XXX_DiscardUnknown() { + xxx_messageInfo_Evidence.DiscardUnknown(m) +} + +var xxx_messageInfo_Evidence proto.InternalMessageInfo + +type isEvidence_Sum interface { + isEvidence_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Evidence_DuplicateVoteEvidence struct { + DuplicateVoteEvidence *DuplicateVoteEvidence `protobuf:"bytes,1,opt,name=duplicate_vote_evidence,json=duplicateVoteEvidence,proto3,oneof" json:"duplicate_vote_evidence,omitempty"` +} +type Evidence_LightClientAttackEvidence struct { + LightClientAttackEvidence *LightClientAttackEvidence `protobuf:"bytes,2,opt,name=light_client_attack_evidence,json=lightClientAttackEvidence,proto3,oneof" json:"light_client_attack_evidence,omitempty"` +} + +func (*Evidence_DuplicateVoteEvidence) isEvidence_Sum() {} +func (*Evidence_LightClientAttackEvidence) isEvidence_Sum() {} + +func (m *Evidence) GetSum() isEvidence_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Evidence) GetDuplicateVoteEvidence() *DuplicateVoteEvidence { + if x, ok := m.GetSum().(*Evidence_DuplicateVoteEvidence); ok { + return x.DuplicateVoteEvidence + } + return nil +} + +func (m *Evidence) GetLightClientAttackEvidence() *LightClientAttackEvidence { + if x, ok := m.GetSum().(*Evidence_LightClientAttackEvidence); ok { + return x.LightClientAttackEvidence + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Evidence) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Evidence_DuplicateVoteEvidence)(nil), + (*Evidence_LightClientAttackEvidence)(nil), + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +type DuplicateVoteEvidence struct { + VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` + VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + ValidatorPower int64 `protobuf:"varint,4,opt,name=validator_power,json=validatorPower,proto3" json:"validator_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *DuplicateVoteEvidence) Reset() { *m = DuplicateVoteEvidence{} } +func (m *DuplicateVoteEvidence) String() string { return proto.CompactTextString(m) } +func (*DuplicateVoteEvidence) ProtoMessage() {} +func (*DuplicateVoteEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_6825fabc78e0a168, []int{1} +} +func (m *DuplicateVoteEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DuplicateVoteEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DuplicateVoteEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DuplicateVoteEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_DuplicateVoteEvidence.Merge(m, src) +} +func (m *DuplicateVoteEvidence) XXX_Size() int { + return m.Size() +} +func (m *DuplicateVoteEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_DuplicateVoteEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_DuplicateVoteEvidence proto.InternalMessageInfo + +func (m *DuplicateVoteEvidence) GetVoteA() *Vote { + if m != nil { + return m.VoteA + } + return nil +} + +func (m *DuplicateVoteEvidence) GetVoteB() *Vote { + if m != nil { + return m.VoteB + } + return nil +} + +func (m *DuplicateVoteEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetValidatorPower() int64 { + if m != nil { + return m.ValidatorPower + } + return 0 +} + +func (m *DuplicateVoteEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock `protobuf:"bytes,1,opt,name=conflicting_block,json=conflictingBlock,proto3" json:"conflicting_block,omitempty"` + CommonHeight int64 `protobuf:"varint,2,opt,name=common_height,json=commonHeight,proto3" json:"common_height,omitempty"` + ByzantineValidators []*Validator `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators,omitempty"` + TotalVotingPower int64 `protobuf:"varint,4,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` +} + +func (m *LightClientAttackEvidence) Reset() { *m = LightClientAttackEvidence{} } +func (m *LightClientAttackEvidence) String() string { return proto.CompactTextString(m) } +func (*LightClientAttackEvidence) ProtoMessage() {} +func (*LightClientAttackEvidence) Descriptor() ([]byte, []int) { + return fileDescriptor_6825fabc78e0a168, []int{2} +} +func (m *LightClientAttackEvidence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightClientAttackEvidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightClientAttackEvidence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightClientAttackEvidence) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightClientAttackEvidence.Merge(m, src) +} +func (m *LightClientAttackEvidence) XXX_Size() int { + return m.Size() +} +func (m *LightClientAttackEvidence) XXX_DiscardUnknown() { + xxx_messageInfo_LightClientAttackEvidence.DiscardUnknown(m) +} + +var xxx_messageInfo_LightClientAttackEvidence proto.InternalMessageInfo + +func (m *LightClientAttackEvidence) GetConflictingBlock() *LightBlock { + if m != nil { + return m.ConflictingBlock + } + return nil +} + +func (m *LightClientAttackEvidence) GetCommonHeight() int64 { + if m != nil { + return m.CommonHeight + } + return 0 +} + +func (m *LightClientAttackEvidence) GetByzantineValidators() []*Validator { + if m != nil { + return m.ByzantineValidators + } + return nil +} + +func (m *LightClientAttackEvidence) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +func (m *LightClientAttackEvidence) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +type EvidenceList struct { + Evidence []Evidence `protobuf:"bytes,1,rep,name=evidence,proto3" json:"evidence"` +} + +func (m *EvidenceList) Reset() { *m = EvidenceList{} } +func (m *EvidenceList) String() string { return proto.CompactTextString(m) } +func (*EvidenceList) ProtoMessage() {} +func (*EvidenceList) Descriptor() ([]byte, []int) { + return fileDescriptor_6825fabc78e0a168, []int{3} +} +func (m *EvidenceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvidenceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvidenceList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvidenceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceList.Merge(m, src) +} +func (m *EvidenceList) XXX_Size() int { + return m.Size() +} +func (m *EvidenceList) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceList.DiscardUnknown(m) +} + +var xxx_messageInfo_EvidenceList proto.InternalMessageInfo + +func (m *EvidenceList) GetEvidence() []Evidence { + if m != nil { + return m.Evidence + } + return nil +} + +func init() { + proto.RegisterType((*Evidence)(nil), "tendermint.types.Evidence") + proto.RegisterType((*DuplicateVoteEvidence)(nil), "tendermint.types.DuplicateVoteEvidence") + proto.RegisterType((*LightClientAttackEvidence)(nil), "tendermint.types.LightClientAttackEvidence") + proto.RegisterType((*EvidenceList)(nil), "tendermint.types.EvidenceList") +} + +func init() { proto.RegisterFile("tendermint/types/evidence.proto", fileDescriptor_6825fabc78e0a168) } + +var fileDescriptor_6825fabc78e0a168 = []byte{ + // 532 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xed, 0x3a, 0xa9, 0xc2, 0xb6, 0x40, 0x58, 0x5a, 0x48, 0x43, 0xe4, 0x44, 0xe1, 0xd0, + 0x48, 0x80, 0x2d, 0x95, 0x03, 0x17, 0x2e, 0x35, 0x20, 0x15, 0x29, 0x42, 0x60, 0xa1, 0x1e, 0xb8, + 0x58, 0x6b, 0x7b, 0xeb, 0xac, 0x6a, 0xef, 0x5a, 0xf1, 0x24, 0xa8, 0x3c, 0x45, 0x1e, 0xab, 0x17, + 0xa4, 0x1e, 0x39, 0x01, 0x4a, 0x78, 0x10, 0xe4, 0xf5, 0x9f, 0x44, 0x75, 0xcc, 0x89, 0x4b, 0xe4, + 0xcc, 0xfc, 0xbe, 0x9d, 0x99, 0xcf, 0xb3, 0x46, 0x7d, 0xa0, 0xdc, 0xa7, 0xd3, 0x88, 0x71, 0x30, + 0xe1, 0x2a, 0xa6, 0x89, 0x49, 0xe7, 0xcc, 0xa7, 0xdc, 0xa3, 0x46, 0x3c, 0x15, 0x20, 0x70, 0x7b, + 0x0d, 0x18, 0x12, 0xe8, 0x1e, 0x04, 0x22, 0x10, 0x32, 0x69, 0xa6, 0x4f, 0x19, 0xd7, 0xed, 0x07, + 0x42, 0x04, 0x21, 0x35, 0xe5, 0x3f, 0x77, 0x76, 0x61, 0x02, 0x8b, 0x68, 0x02, 0x24, 0x8a, 0x73, + 0xa0, 0x57, 0xa9, 0x24, 0x7f, 0xf3, 0xec, 0xa0, 0x92, 0x9d, 0x93, 0x90, 0xf9, 0x04, 0xc4, 0x34, + 0x23, 0x86, 0x7f, 0x54, 0xd4, 0x7a, 0x97, 0xf7, 0x86, 0x09, 0x7a, 0xec, 0xcf, 0xe2, 0x90, 0x79, + 0x04, 0xa8, 0x33, 0x17, 0x40, 0x9d, 0xa2, 0xed, 0x8e, 0x3a, 0x50, 0x47, 0x7b, 0x27, 0xc7, 0xc6, + 0xed, 0xbe, 0x8d, 0xb7, 0x85, 0xe0, 0x5c, 0x00, 0x2d, 0x4e, 0x3a, 0x53, 0xec, 0x43, 0x7f, 0x5b, + 0x02, 0x73, 0xd4, 0x0b, 0x59, 0x30, 0x01, 0xc7, 0x0b, 0x19, 0xe5, 0xe0, 0x10, 0x00, 0xe2, 0x5d, + 0xae, 0xeb, 0xec, 0xc8, 0x3a, 0xcf, 0xaa, 0x75, 0xc6, 0xa9, 0xea, 0x8d, 0x14, 0x9d, 0x4a, 0xcd, + 0x46, 0xad, 0xa3, 0xb0, 0x2e, 0x69, 0x35, 0x91, 0x96, 0xcc, 0xa2, 0xe1, 0x62, 0x07, 0x1d, 0x6e, + 0xed, 0x14, 0xbf, 0x40, 0xbb, 0x72, 0x52, 0x92, 0x8f, 0xf8, 0xa8, 0x5a, 0x3a, 0xe5, 0xed, 0x66, + 0x4a, 0x9d, 0x96, 0xb8, 0x9b, 0x77, 0xfa, 0x4f, 0xdc, 0xc2, 0xcf, 0x11, 0x06, 0x01, 0x24, 0x4c, + 0xdd, 0x64, 0x3c, 0x70, 0x62, 0xf1, 0x95, 0x4e, 0x3b, 0xda, 0x40, 0x1d, 0x69, 0x76, 0x5b, 0x66, + 0xce, 0x65, 0xe2, 0x63, 0x1a, 0xc7, 0xc7, 0xe8, 0x7e, 0xf9, 0x7e, 0x72, 0xb4, 0x21, 0xd1, 0x7b, + 0x65, 0x38, 0x03, 0x2d, 0x74, 0xa7, 0x5c, 0x84, 0x4e, 0x53, 0x36, 0xd2, 0x35, 0xb2, 0x55, 0x31, + 0x8a, 0x55, 0x31, 0x3e, 0x17, 0x84, 0xd5, 0xba, 0xfe, 0xd9, 0x57, 0x16, 0xbf, 0xfa, 0xaa, 0xbd, + 0x96, 0x0d, 0xbf, 0xef, 0xa0, 0xa3, 0x5a, 0x53, 0xf1, 0x7b, 0xf4, 0xc0, 0x13, 0xfc, 0x22, 0x64, + 0x9e, 0xec, 0xdb, 0x0d, 0x85, 0x77, 0x99, 0x3b, 0xd4, 0xab, 0x79, 0x39, 0x56, 0xca, 0xd8, 0xed, + 0x0d, 0x99, 0x8c, 0xe0, 0xa7, 0xe8, 0xae, 0x27, 0xa2, 0x48, 0x70, 0x67, 0x42, 0x53, 0x4e, 0x3a, + 0xa7, 0xd9, 0xfb, 0x59, 0xf0, 0x4c, 0xc6, 0xf0, 0x07, 0x74, 0xe0, 0x5e, 0x7d, 0x23, 0x1c, 0x18, + 0xa7, 0x4e, 0x39, 0x6d, 0xd2, 0xd1, 0x06, 0xda, 0x68, 0xef, 0xe4, 0xc9, 0x16, 0x97, 0x0b, 0xc6, + 0x7e, 0x58, 0x0a, 0xcb, 0x58, 0x52, 0x63, 0x7c, 0xa3, 0xc6, 0xf8, 0xff, 0xe1, 0xe7, 0x18, 0xed, + 0x17, 0xee, 0x8d, 0x59, 0x02, 0xf8, 0x35, 0x6a, 0x6d, 0xdc, 0x1e, 0x4d, 0x1e, 0x59, 0x99, 0xa2, + 0xdc, 0xd3, 0x46, 0x7a, 0xa4, 0x5d, 0x2a, 0xac, 0x4f, 0xd7, 0x4b, 0x5d, 0xbd, 0x59, 0xea, 0xea, + 0xef, 0xa5, 0xae, 0x2e, 0x56, 0xba, 0x72, 0xb3, 0xd2, 0x95, 0x1f, 0x2b, 0x5d, 0xf9, 0xf2, 0x2a, + 0x60, 0x30, 0x99, 0xb9, 0x86, 0x27, 0x22, 0x73, 0xf3, 0x7a, 0xaf, 0x1f, 0xb3, 0xaf, 0xc8, 0xed, + 0xab, 0xef, 0xee, 0xca, 0xf8, 0xcb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa6, 0x21, 0x16, 0x68, + 0x9d, 0x04, 0x00, 0x00, +} + +func (m *Evidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.DuplicateVoteEvidence != nil { + { + size, err := m.DuplicateVoteEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Evidence_LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Evidence_LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LightClientAttackEvidence != nil { + { + size, err := m.LightClientAttackEvidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *DuplicateVoteEvidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DuplicateVoteEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DuplicateVoteEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintEvidence(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x2a + if m.ValidatorPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.ValidatorPower)) + i-- + dAtA[i] = 0x20 + } + if m.TotalVotingPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.VoteB != nil { + { + size, err := m.VoteB.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.VoteA != nil { + { + size, err := m.VoteA.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightClientAttackEvidence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightClientAttackEvidence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightClientAttackEvidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintEvidence(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0x2a + if m.TotalVotingPower != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x20 + } + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.CommonHeight != 0 { + i = encodeVarintEvidence(dAtA, i, uint64(m.CommonHeight)) + i-- + dAtA[i] = 0x10 + } + if m.ConflictingBlock != nil { + { + size, err := m.ConflictingBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvidenceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvidenceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvidenceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Evidence) > 0 { + for iNdEx := len(m.Evidence) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Evidence[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvidence(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintEvidence(dAtA []byte, offset int, v uint64) int { + offset -= sovEvidence(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Evidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Evidence_DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DuplicateVoteEvidence != nil { + l = m.DuplicateVoteEvidence.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + return n +} +func (m *Evidence_LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LightClientAttackEvidence != nil { + l = m.LightClientAttackEvidence.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + return n +} +func (m *DuplicateVoteEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VoteA != nil { + l = m.VoteA.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.VoteB != nil { + l = m.VoteB.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.TotalVotingPower != 0 { + n += 1 + sovEvidence(uint64(m.TotalVotingPower)) + } + if m.ValidatorPower != 0 { + n += 1 + sovEvidence(uint64(m.ValidatorPower)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovEvidence(uint64(l)) + return n +} + +func (m *LightClientAttackEvidence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConflictingBlock != nil { + l = m.ConflictingBlock.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + if m.CommonHeight != 0 { + n += 1 + sovEvidence(uint64(m.CommonHeight)) + } + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + } + if m.TotalVotingPower != 0 { + n += 1 + sovEvidence(uint64(m.TotalVotingPower)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovEvidence(uint64(l)) + return n +} + +func (m *EvidenceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Evidence) > 0 { + for _, e := range m.Evidence { + l = e.Size() + n += 1 + l + sovEvidence(uint64(l)) + } + } + return n +} + +func sovEvidence(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvidence(x uint64) (n int) { + return sovEvidence(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Evidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DuplicateVoteEvidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DuplicateVoteEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Evidence_DuplicateVoteEvidence{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LightClientAttackEvidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &LightClientAttackEvidence{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Evidence_LightClientAttackEvidence{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DuplicateVoteEvidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DuplicateVoteEvidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DuplicateVoteEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteA == nil { + m.VoteA = &Vote{} + } + if err := m.VoteA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteB", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteB == nil { + m.VoteB = &Vote{} + } + if err := m.VoteB.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorPower", wireType) + } + m.ValidatorPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightClientAttackEvidence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightClientAttackEvidence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightClientAttackEvidence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConflictingBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConflictingBlock == nil { + m.ConflictingBlock = &LightBlock{} + } + if err := m.ConflictingBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonHeight", wireType) + } + m.CommonHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommonHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, &Validator{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvidenceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvidenceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvidenceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvidence + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvidence + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvidence + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Evidence = append(m.Evidence, Evidence{}) + if err := m.Evidence[len(m.Evidence)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvidence(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEvidence + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvidence(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvidence + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvidence + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvidence + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvidence + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvidence = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvidence = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvidence = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto new file mode 100644 index 000000000..451b8dca3 --- /dev/null +++ b/proto/tendermint/types/evidence.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "tendermint/types/types.proto"; +import "tendermint/types/validator.proto"; + +message Evidence { + oneof sum { + DuplicateVoteEvidence duplicate_vote_evidence = 1; + LightClientAttackEvidence light_client_attack_evidence = 2; + } +} + +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +message DuplicateVoteEvidence { + tendermint.types.Vote vote_a = 1; + tendermint.types.Vote vote_b = 2; + int64 total_voting_power = 3; + int64 validator_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client. +message LightClientAttackEvidence { + tendermint.types.LightBlock conflicting_block = 1; + int64 common_height = 2; + repeated tendermint.types.Validator byzantine_validators = 3; + int64 total_voting_power = 4; + google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} + +message EvidenceList { + repeated Evidence evidence = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go new file mode 100644 index 000000000..5c9eff877 --- /dev/null +++ b/proto/tendermint/types/params.pb.go @@ -0,0 +1,1798 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +type ConsensusParams struct { + Block BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block"` + Evidence EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence"` + Validator ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator"` + Version VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version"` +} + +func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } +func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } +func (*ConsensusParams) ProtoMessage() {} +func (*ConsensusParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{0} +} +func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusParams.Merge(m, src) +} +func (m *ConsensusParams) XXX_Size() int { + return m.Size() +} +func (m *ConsensusParams) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo + +func (m *ConsensusParams) GetBlock() BlockParams { + if m != nil { + return m.Block + } + return BlockParams{} +} + +func (m *ConsensusParams) GetEvidence() EvidenceParams { + if m != nil { + return m.Evidence + } + return EvidenceParams{} +} + +func (m *ConsensusParams) GetValidator() ValidatorParams { + if m != nil { + return m.Validator + } + return ValidatorParams{} +} + +func (m *ConsensusParams) GetVersion() VersionParams { + if m != nil { + return m.Version + } + return VersionParams{} +} + +// BlockParams contains limits on the block size. +type BlockParams struct { + // Max block size, in bytes. + // Note: must be greater than 0 + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Max gas per block. + // Note: must be greater or equal to -1 + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` + // Minimum time increment between consecutive blocks (in milliseconds) If the + // block header timestamp is ahead of the system clock, decrease this value. + // + // Not exposed to the application. + TimeIotaMs int64 `protobuf:"varint,3,opt,name=time_iota_ms,json=timeIotaMs,proto3" json:"time_iota_ms,omitempty"` +} + +func (m *BlockParams) Reset() { *m = BlockParams{} } +func (m *BlockParams) String() string { return proto.CompactTextString(m) } +func (*BlockParams) ProtoMessage() {} +func (*BlockParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{1} +} +func (m *BlockParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockParams.Merge(m, src) +} +func (m *BlockParams) XXX_Size() int { + return m.Size() +} +func (m *BlockParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockParams.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockParams proto.InternalMessageInfo + +func (m *BlockParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +func (m *BlockParams) GetMaxGas() int64 { + if m != nil { + return m.MaxGas + } + return 0 +} + +func (m *BlockParams) GetTimeIotaMs() int64 { + if m != nil { + return m.TimeIotaMs + } + return 0 +} + +// EvidenceParams determine how we handle evidence of malfeasance. +type EvidenceParams struct { + // Max age of evidence, in blocks. + // + // The basic formula for calculating this is: MaxAgeDuration / {average block + // time}. + MaxAgeNumBlocks int64 `protobuf:"varint,1,opt,name=max_age_num_blocks,json=maxAgeNumBlocks,proto3" json:"max_age_num_blocks,omitempty"` + // Max age of evidence, in time. + // + // It should correspond with an app's "unbonding period" or other similar + // mechanism for handling [Nothing-At-Stake + // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` + // This sets the maximum size of total evidence in bytes that can be committed in a single block. + // and should fall comfortably under the max block bytes. + // Default is 1048576 or 1MB + MaxBytes int64 `protobuf:"varint,3,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` +} + +func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } +func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } +func (*EvidenceParams) ProtoMessage() {} +func (*EvidenceParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{2} +} +func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvidenceParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvidenceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceParams.Merge(m, src) +} +func (m *EvidenceParams) XXX_Size() int { + return m.Size() +} +func (m *EvidenceParams) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceParams.DiscardUnknown(m) +} + +var xxx_messageInfo_EvidenceParams proto.InternalMessageInfo + +func (m *EvidenceParams) GetMaxAgeNumBlocks() int64 { + if m != nil { + return m.MaxAgeNumBlocks + } + return 0 +} + +func (m *EvidenceParams) GetMaxAgeDuration() time.Duration { + if m != nil { + return m.MaxAgeDuration + } + return 0 +} + +func (m *EvidenceParams) GetMaxBytes() int64 { + if m != nil { + return m.MaxBytes + } + return 0 +} + +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino names. +type ValidatorParams struct { + PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes,proto3" json:"pub_key_types,omitempty"` +} + +func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } +func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } +func (*ValidatorParams) ProtoMessage() {} +func (*ValidatorParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{3} +} +func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorParams.Merge(m, src) +} +func (m *ValidatorParams) XXX_Size() int { + return m.Size() +} +func (m *ValidatorParams) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorParams proto.InternalMessageInfo + +func (m *ValidatorParams) GetPubKeyTypes() []string { + if m != nil { + return m.PubKeyTypes + } + return nil +} + +// VersionParams contains the ABCI application version. +type VersionParams struct { + AppVersion uint64 `protobuf:"varint,1,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` +} + +func (m *VersionParams) Reset() { *m = VersionParams{} } +func (m *VersionParams) String() string { return proto.CompactTextString(m) } +func (*VersionParams) ProtoMessage() {} +func (*VersionParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{4} +} +func (m *VersionParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VersionParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VersionParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VersionParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionParams.Merge(m, src) +} +func (m *VersionParams) XXX_Size() int { + return m.Size() +} +func (m *VersionParams) XXX_DiscardUnknown() { + xxx_messageInfo_VersionParams.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionParams proto.InternalMessageInfo + +func (m *VersionParams) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + +// HashedParams is a subset of ConsensusParams. +// +// It is hashed into the Header.ConsensusHash. +type HashedParams struct { + BlockMaxBytes int64 `protobuf:"varint,1,opt,name=block_max_bytes,json=blockMaxBytes,proto3" json:"block_max_bytes,omitempty"` + BlockMaxGas int64 `protobuf:"varint,2,opt,name=block_max_gas,json=blockMaxGas,proto3" json:"block_max_gas,omitempty"` +} + +func (m *HashedParams) Reset() { *m = HashedParams{} } +func (m *HashedParams) String() string { return proto.CompactTextString(m) } +func (*HashedParams) ProtoMessage() {} +func (*HashedParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{5} +} +func (m *HashedParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashedParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashedParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashedParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashedParams.Merge(m, src) +} +func (m *HashedParams) XXX_Size() int { + return m.Size() +} +func (m *HashedParams) XXX_DiscardUnknown() { + xxx_messageInfo_HashedParams.DiscardUnknown(m) +} + +var xxx_messageInfo_HashedParams proto.InternalMessageInfo + +func (m *HashedParams) GetBlockMaxBytes() int64 { + if m != nil { + return m.BlockMaxBytes + } + return 0 +} + +func (m *HashedParams) GetBlockMaxGas() int64 { + if m != nil { + return m.BlockMaxGas + } + return 0 +} + +func init() { + proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") + proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") + proto.RegisterType((*EvidenceParams)(nil), "tendermint.types.EvidenceParams") + proto.RegisterType((*ValidatorParams)(nil), "tendermint.types.ValidatorParams") + proto.RegisterType((*VersionParams)(nil), "tendermint.types.VersionParams") + proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") +} + +func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } + +var fileDescriptor_e12598271a686f57 = []byte{ + // 537 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x31, 0x6f, 0xd3, 0x40, + 0x18, 0xcd, 0xd5, 0xa5, 0x4d, 0xbe, 0x34, 0x4d, 0x75, 0x42, 0x22, 0x14, 0xd5, 0x0e, 0x1e, 0x50, + 0x25, 0x24, 0x5b, 0x82, 0x01, 0xd1, 0xa5, 0xc2, 0x50, 0x15, 0x84, 0x82, 0x90, 0x05, 0x0c, 0x5d, + 0xac, 0x73, 0x72, 0xb8, 0x56, 0x73, 0x3e, 0xcb, 0x77, 0x8e, 0x92, 0x7f, 0xc1, 0xd8, 0xb1, 0x23, + 0xfc, 0x03, 0x7e, 0x42, 0xc7, 0x8e, 0x4c, 0x80, 0x92, 0x85, 0x9f, 0x81, 0x7c, 0xce, 0xe1, 0x38, + 0x65, 0xf3, 0x7d, 0xdf, 0x7b, 0xef, 0xfc, 0xde, 0xd3, 0xc1, 0x81, 0xa4, 0xc9, 0x88, 0x66, 0x2c, + 0x4e, 0xa4, 0x2b, 0x67, 0x29, 0x15, 0x6e, 0x4a, 0x32, 0xc2, 0x84, 0x93, 0x66, 0x5c, 0x72, 0xbc, + 0x57, 0xad, 0x1d, 0xb5, 0xde, 0xbf, 0x1b, 0xf1, 0x88, 0xab, 0xa5, 0x5b, 0x7c, 0x95, 0xb8, 0x7d, + 0x33, 0xe2, 0x3c, 0x1a, 0x53, 0x57, 0x9d, 0xc2, 0xfc, 0xb3, 0x3b, 0xca, 0x33, 0x22, 0x63, 0x9e, + 0x94, 0x7b, 0xfb, 0x72, 0x03, 0xba, 0x2f, 0x79, 0x22, 0x68, 0x22, 0x72, 0xf1, 0x5e, 0xdd, 0x80, + 0x9f, 0xc3, 0x9d, 0x70, 0xcc, 0x87, 0x17, 0x3d, 0xd4, 0x47, 0x87, 0xed, 0x27, 0x07, 0xce, 0xfa, + 0x5d, 0x8e, 0x57, 0xac, 0x4b, 0xb4, 0xb7, 0x79, 0xfd, 0xd3, 0x6a, 0xf8, 0x25, 0x03, 0x7b, 0xd0, + 0xa4, 0x93, 0x78, 0x44, 0x93, 0x21, 0xed, 0x6d, 0x28, 0x76, 0xff, 0x36, 0xfb, 0x64, 0x89, 0xa8, + 0x09, 0xfc, 0xe3, 0xe1, 0x13, 0x68, 0x4d, 0xc8, 0x38, 0x1e, 0x11, 0xc9, 0xb3, 0x9e, 0xa1, 0x44, + 0x1e, 0xde, 0x16, 0xf9, 0xa4, 0x21, 0x35, 0x95, 0x8a, 0x89, 0x8f, 0x61, 0x7b, 0x42, 0x33, 0x11, + 0xf3, 0xa4, 0xb7, 0xa9, 0x44, 0xac, 0xff, 0x88, 0x94, 0x80, 0x9a, 0x84, 0x66, 0xd9, 0x14, 0xda, + 0x2b, 0x3e, 0xf1, 0x03, 0x68, 0x31, 0x32, 0x0d, 0xc2, 0x99, 0xa4, 0x42, 0x25, 0x63, 0xf8, 0x4d, + 0x46, 0xa6, 0x5e, 0x71, 0xc6, 0xf7, 0x60, 0xbb, 0x58, 0x46, 0x44, 0x28, 0xdb, 0x86, 0xbf, 0xc5, + 0xc8, 0xf4, 0x94, 0x08, 0xdc, 0x87, 0x1d, 0x19, 0x33, 0x1a, 0xc4, 0x5c, 0x92, 0x80, 0x09, 0xe5, + 0xc7, 0xf0, 0xa1, 0x98, 0xbd, 0xe1, 0x92, 0x0c, 0x84, 0xfd, 0x0d, 0xc1, 0x6e, 0x3d, 0x11, 0xfc, + 0x18, 0x70, 0xa1, 0x46, 0x22, 0x1a, 0x24, 0x39, 0x0b, 0x54, 0xb4, 0xfa, 0xce, 0x2e, 0x23, 0xd3, + 0x17, 0x11, 0x7d, 0x97, 0x33, 0xf5, 0x73, 0x02, 0x0f, 0x60, 0x4f, 0x83, 0x75, 0xb7, 0xcb, 0xe8, + 0xef, 0x3b, 0x65, 0xf9, 0x8e, 0x2e, 0xdf, 0x79, 0xb5, 0x04, 0x78, 0xcd, 0xc2, 0xea, 0xe5, 0x2f, + 0x0b, 0xf9, 0xbb, 0xa5, 0x9e, 0xde, 0xd4, 0x6d, 0x1a, 0x75, 0x9b, 0xf6, 0x31, 0x74, 0xd7, 0x72, + 0xc7, 0x36, 0x74, 0xd2, 0x3c, 0x0c, 0x2e, 0xe8, 0x2c, 0x50, 0x99, 0xf6, 0x50, 0xdf, 0x38, 0x6c, + 0xf9, 0xed, 0x34, 0x0f, 0xdf, 0xd2, 0xd9, 0x87, 0x62, 0x74, 0xd4, 0xfc, 0x7e, 0x65, 0xa1, 0x3f, + 0x57, 0x16, 0xb2, 0x8f, 0xa0, 0x53, 0xcb, 0x1c, 0x5b, 0xd0, 0x26, 0x69, 0x1a, 0xe8, 0xa6, 0x0a, + 0x8f, 0x9b, 0x3e, 0x90, 0x34, 0x5d, 0xc2, 0x56, 0xb8, 0x67, 0xb0, 0xf3, 0x9a, 0x88, 0x73, 0x3a, + 0x5a, 0x52, 0x1f, 0x41, 0x57, 0x25, 0x13, 0xac, 0xd7, 0xd2, 0x51, 0xe3, 0x81, 0xee, 0xc6, 0x86, + 0x4e, 0x85, 0xab, 0x1a, 0x6a, 0x6b, 0xd4, 0x29, 0x11, 0xde, 0xc7, 0xaf, 0x73, 0x13, 0x5d, 0xcf, + 0x4d, 0x74, 0x33, 0x37, 0xd1, 0xef, 0xb9, 0x89, 0xbe, 0x2c, 0xcc, 0xc6, 0xcd, 0xc2, 0x6c, 0xfc, + 0x58, 0x98, 0x8d, 0xb3, 0x67, 0x51, 0x2c, 0xcf, 0xf3, 0xd0, 0x19, 0x72, 0xe6, 0xae, 0x3e, 0xcb, + 0xea, 0xb3, 0x7c, 0x77, 0xeb, 0x4f, 0x36, 0xdc, 0x52, 0xf3, 0xa7, 0x7f, 0x03, 0x00, 0x00, 0xff, + 0xff, 0xfe, 0xe0, 0x3d, 0x9c, 0xcd, 0x03, 0x00, 0x00, +} + +func (this *ConsensusParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ConsensusParams) + if !ok { + that2, ok := that.(ConsensusParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Block.Equal(&that1.Block) { + return false + } + if !this.Evidence.Equal(&that1.Evidence) { + return false + } + if !this.Validator.Equal(&that1.Validator) { + return false + } + if !this.Version.Equal(&that1.Version) { + return false + } + return true +} +func (this *BlockParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockParams) + if !ok { + that2, ok := that.(BlockParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + if this.MaxGas != that1.MaxGas { + return false + } + if this.TimeIotaMs != that1.TimeIotaMs { + return false + } + return true +} +func (this *EvidenceParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EvidenceParams) + if !ok { + that2, ok := that.(EvidenceParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MaxAgeNumBlocks != that1.MaxAgeNumBlocks { + return false + } + if this.MaxAgeDuration != that1.MaxAgeDuration { + return false + } + if this.MaxBytes != that1.MaxBytes { + return false + } + return true +} +func (this *ValidatorParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorParams) + if !ok { + that2, ok := that.(ValidatorParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.PubKeyTypes) != len(that1.PubKeyTypes) { + return false + } + for i := range this.PubKeyTypes { + if this.PubKeyTypes[i] != that1.PubKeyTypes[i] { + return false + } + } + return true +} +func (this *VersionParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VersionParams) + if !ok { + that2, ok := that.(VersionParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.AppVersion != that1.AppVersion { + return false + } + return true +} +func (this *HashedParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*HashedParams) + if !ok { + that2, ok := that.(HashedParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BlockMaxBytes != that1.BlockMaxBytes { + return false + } + if this.BlockMaxGas != that1.BlockMaxGas { + return false + } + return true +} +func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Evidence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Block.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BlockParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeIotaMs != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.TimeIotaMs)) + i-- + dAtA[i] = 0x18 + } + if m.MaxGas != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxBytes)) + i-- + dAtA[i] = 0x18 + } + n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintParams(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x12 + if m.MaxAgeNumBlocks != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxAgeNumBlocks)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for iNdEx := len(m.PubKeyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PubKeyTypes[iNdEx]) + copy(dAtA[i:], m.PubKeyTypes[iNdEx]) + i = encodeVarintParams(dAtA, i, uint64(len(m.PubKeyTypes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VersionParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VersionParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppVersion != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.AppVersion)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HashedParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashedParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockMaxGas != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.BlockMaxGas)) + i-- + dAtA[i] = 0x10 + } + if m.BlockMaxBytes != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.BlockMaxBytes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedValidatorParams(r randyParams, easy bool) *ValidatorParams { + this := &ValidatorParams{} + v1 := r.Intn(10) + this.PubKeyTypes = make([]string, v1) + for i := 0; i < v1; i++ { + this.PubKeyTypes[i] = string(randStringParams(r)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedVersionParams(r randyParams, easy bool) *VersionParams { + this := &VersionParams{} + this.AppVersion = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyParams interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneParams(r randyParams) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringParams(r randyParams) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneParams(r) + } + return string(tmps) +} +func randUnrecognizedParams(r randyParams, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldParams(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldParams(dAtA []byte, r randyParams, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateParams(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateParams(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateParams(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateParams(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Block.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Evidence.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Validator.Size() + n += 1 + l + sovParams(uint64(l)) + l = m.Version.Size() + n += 1 + l + sovParams(uint64(l)) + return n +} + +func (m *BlockParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovParams(uint64(m.MaxGas)) + } + if m.TimeIotaMs != 0 { + n += 1 + sovParams(uint64(m.TimeIotaMs)) + } + return n +} + +func (m *EvidenceParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxAgeNumBlocks != 0 { + n += 1 + sovParams(uint64(m.MaxAgeNumBlocks)) + } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration) + n += 1 + l + sovParams(uint64(l)) + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + return n +} + +func (m *ValidatorParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + l = len(s) + n += 1 + l + sovParams(uint64(l)) + } + } + return n +} + +func (m *VersionParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AppVersion != 0 { + n += 1 + sovParams(uint64(m.AppVersion)) + } + return n +} + +func (m *HashedParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockMaxBytes != 0 { + n += 1 + sovParams(uint64(m.BlockMaxBytes)) + } + if m.BlockMaxGas != 0 { + n += 1 + sovParams(uint64(m.BlockMaxGas)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ConsensusParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + } + m.MaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeIotaMs", wireType) + } + m.TimeIotaMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeIotaMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvidenceParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvidenceParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvidenceParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeNumBlocks", wireType) + } + m.MaxAgeNumBlocks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAgeNumBlocks |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAgeDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MaxAgeDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) + } + m.MaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyTypes = append(m.PubKeyTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VersionParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashedParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashedParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashedParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMaxBytes", wireType) + } + m.BlockMaxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockMaxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockMaxGas", wireType) + } + m.BlockMaxGas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockMaxGas |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto new file mode 100644 index 000000000..0de7d846f --- /dev/null +++ b/proto/tendermint/types/params.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; + +option (gogoproto.equal_all) = true; + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +message ConsensusParams { + BlockParams block = 1 [(gogoproto.nullable) = false]; + EvidenceParams evidence = 2 [(gogoproto.nullable) = false]; + ValidatorParams validator = 3 [(gogoproto.nullable) = false]; + VersionParams version = 4 [(gogoproto.nullable) = false]; +} + +// BlockParams contains limits on the block size. +message BlockParams { + // Max block size, in bytes. + // Note: must be greater than 0 + int64 max_bytes = 1; + // Max gas per block. + // Note: must be greater or equal to -1 + int64 max_gas = 2; + // Minimum time increment between consecutive blocks (in milliseconds) If the + // block header timestamp is ahead of the system clock, decrease this value. + // + // Not exposed to the application. + int64 time_iota_ms = 3; +} + +// EvidenceParams determine how we handle evidence of malfeasance. +message EvidenceParams { + // Max age of evidence, in blocks. + // + // The basic formula for calculating this is: MaxAgeDuration / {average block + // time}. + int64 max_age_num_blocks = 1; + + // Max age of evidence, in time. + // + // It should correspond with an app's "unbonding period" or other similar + // mechanism for handling [Nothing-At-Stake + // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). + google.protobuf.Duration max_age_duration = 2 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + + // This sets the maximum size of total evidence in bytes that can be committed in a single block. + // and should fall comfortably under the max block bytes. + // Default is 1048576 or 1MB + int64 max_bytes = 3; +} + +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino names. +message ValidatorParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + repeated string pub_key_types = 1; +} + +// VersionParams contains the ABCI application version. +message VersionParams { + option (gogoproto.populate) = true; + option (gogoproto.equal) = true; + + uint64 app_version = 1; +} + +// HashedParams is a subset of ConsensusParams. +// +// It is hashed into the Header.ConsensusHash. +message HashedParams { + int64 block_max_bytes = 1; + int64 block_max_gas = 2; +} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go new file mode 100644 index 000000000..27a936097 --- /dev/null +++ b/proto/tendermint/types/types.pb.go @@ -0,0 +1,4611 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/types.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + version "github.com/tendermint/tendermint/proto/tendermint/version" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BlockIdFlag indicates which BlcokID the signature is for +type BlockIDFlag int32 + +const ( + BlockIDFlagUnknown BlockIDFlag = 0 + BlockIDFlagAbsent BlockIDFlag = 1 + BlockIDFlagCommit BlockIDFlag = 2 + BlockIDFlagNil BlockIDFlag = 3 +) + +var BlockIDFlag_name = map[int32]string{ + 0: "BLOCK_ID_FLAG_UNKNOWN", + 1: "BLOCK_ID_FLAG_ABSENT", + 2: "BLOCK_ID_FLAG_COMMIT", + 3: "BLOCK_ID_FLAG_NIL", +} + +var BlockIDFlag_value = map[string]int32{ + "BLOCK_ID_FLAG_UNKNOWN": 0, + "BLOCK_ID_FLAG_ABSENT": 1, + "BLOCK_ID_FLAG_COMMIT": 2, + "BLOCK_ID_FLAG_NIL": 3, +} + +func (x BlockIDFlag) String() string { + return proto.EnumName(BlockIDFlag_name, int32(x)) +} + +func (BlockIDFlag) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{0} +} + +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType int32 + +const ( + UnknownType SignedMsgType = 0 + // Votes + PrevoteType SignedMsgType = 1 + PrecommitType SignedMsgType = 2 + // Proposals + ProposalType SignedMsgType = 32 +) + +var SignedMsgType_name = map[int32]string{ + 0: "SIGNED_MSG_TYPE_UNKNOWN", + 1: "SIGNED_MSG_TYPE_PREVOTE", + 2: "SIGNED_MSG_TYPE_PRECOMMIT", + 32: "SIGNED_MSG_TYPE_PROPOSAL", +} + +var SignedMsgType_value = map[string]int32{ + "SIGNED_MSG_TYPE_UNKNOWN": 0, + "SIGNED_MSG_TYPE_PREVOTE": 1, + "SIGNED_MSG_TYPE_PRECOMMIT": 2, + "SIGNED_MSG_TYPE_PROPOSAL": 32, +} + +func (x SignedMsgType) String() string { + return proto.EnumName(SignedMsgType_name, int32(x)) +} + +func (SignedMsgType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{1} +} + +// PartsetHeader +type PartSetHeader struct { + Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{0} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(m, src) +} +func (m *PartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo + +func (m *PartSetHeader) GetTotal() uint32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *PartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +type Part struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` + Proof crypto.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof"` +} + +func (m *Part) Reset() { *m = Part{} } +func (m *Part) String() string { return proto.CompactTextString(m) } +func (*Part) ProtoMessage() {} +func (*Part) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{1} +} +func (m *Part) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Part) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Part.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Part) XXX_Merge(src proto.Message) { + xxx_messageInfo_Part.Merge(m, src) +} +func (m *Part) XXX_Size() int { + return m.Size() +} +func (m *Part) XXX_DiscardUnknown() { + xxx_messageInfo_Part.DiscardUnknown(m) +} + +var xxx_messageInfo_Part proto.InternalMessageInfo + +func (m *Part) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Part) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +func (m *Part) GetProof() crypto.Proof { + if m != nil { + return m.Proof + } + return crypto.Proof{} +} + +// BlockID +type BlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartSetHeader PartSetHeader `protobuf:"bytes,2,opt,name=part_set_header,json=partSetHeader,proto3" json:"part_set_header"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{2} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(m, src) +} +func (m *BlockID) XXX_Size() int { + return m.Size() +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockID proto.InternalMessageInfo + +func (m *BlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *BlockID) GetPartSetHeader() PartSetHeader { + if m != nil { + return m.PartSetHeader + } + return PartSetHeader{} +} + +// Header defines the structure of a Tendermint block header. +type Header struct { + // basic block info + Version version.Consensus `protobuf:"bytes,1,opt,name=version,proto3" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // prev block info + LastBlockId BlockID `protobuf:"bytes,5,opt,name=last_block_id,json=lastBlockId,proto3" json:"last_block_id"` + // hashes of block data + LastCommitHash []byte `protobuf:"bytes,6,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` + DataHash []byte `protobuf:"bytes,7,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // hashes from the app output from the prev block + ValidatorsHash []byte `protobuf:"bytes,8,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,10,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` + AppHash []byte `protobuf:"bytes,11,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,12,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // consensus info + EvidenceHash []byte `protobuf:"bytes,13,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,14,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{3} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return m.Size() +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetVersion() version.Consensus { + if m != nil { + return m.Version + } + return version.Consensus{} +} + +func (m *Header) GetChainID() string { + if m != nil { + return m.ChainID + } + return "" +} + +func (m *Header) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Header) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} +} + +func (m *Header) GetLastBlockId() BlockID { + if m != nil { + return m.LastBlockId + } + return BlockID{} +} + +func (m *Header) GetLastCommitHash() []byte { + if m != nil { + return m.LastCommitHash + } + return nil +} + +func (m *Header) GetDataHash() []byte { + if m != nil { + return m.DataHash + } + return nil +} + +func (m *Header) GetValidatorsHash() []byte { + if m != nil { + return m.ValidatorsHash + } + return nil +} + +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *Header) GetConsensusHash() []byte { + if m != nil { + return m.ConsensusHash + } + return nil +} + +func (m *Header) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +func (m *Header) GetLastResultsHash() []byte { + if m != nil { + return m.LastResultsHash + } + return nil +} + +func (m *Header) GetEvidenceHash() []byte { + if m != nil { + return m.EvidenceHash + } + return nil +} + +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +// Data contains the set of transactions included in the block +type Data struct { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *Data) Reset() { *m = Data{} } +func (m *Data) String() string { return proto.CompactTextString(m) } +func (*Data) ProtoMessage() {} +func (*Data) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{4} +} +func (m *Data) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Data) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Data.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Data) XXX_Merge(src proto.Message) { + xxx_messageInfo_Data.Merge(m, src) +} +func (m *Data) XXX_Size() int { + return m.Size() +} +func (m *Data) XXX_DiscardUnknown() { + xxx_messageInfo_Data.DiscardUnknown(m) +} + +var xxx_messageInfo_Data proto.InternalMessageInfo + +func (m *Data) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +type Vote struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,4,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + ValidatorAddress []byte `protobuf:"bytes,6,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` + Signature []byte `protobuf:"bytes,8,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *Vote) Reset() { *m = Vote{} } +func (m *Vote) String() string { return proto.CompactTextString(m) } +func (*Vote) ProtoMessage() {} +func (*Vote) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{5} +} +func (m *Vote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vote) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vote.Merge(m, src) +} +func (m *Vote) XXX_Size() int { + return m.Size() +} +func (m *Vote) XXX_DiscardUnknown() { + xxx_messageInfo_Vote.DiscardUnknown(m) +} + +var xxx_messageInfo_Vote proto.InternalMessageInfo + +func (m *Vote) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *Vote) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Vote) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Vote) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Vote) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *Vote) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *Vote) GetValidatorIndex() int32 { + if m != nil { + return m.ValidatorIndex + } + return 0 +} + +func (m *Vote) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// Commit contains the evidence that a block was committed by a set of validators. +type Commit struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` + BlockID BlockID `protobuf:"bytes,3,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Signatures []CommitSig `protobuf:"bytes,4,rep,name=signatures,proto3" json:"signatures"` +} + +func (m *Commit) Reset() { *m = Commit{} } +func (m *Commit) String() string { return proto.CompactTextString(m) } +func (*Commit) ProtoMessage() {} +func (*Commit) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{6} +} +func (m *Commit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Commit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Commit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Commit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Commit.Merge(m, src) +} +func (m *Commit) XXX_Size() int { + return m.Size() +} +func (m *Commit) XXX_DiscardUnknown() { + xxx_messageInfo_Commit.DiscardUnknown(m) +} + +var xxx_messageInfo_Commit proto.InternalMessageInfo + +func (m *Commit) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Commit) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Commit) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Commit) GetSignatures() []CommitSig { + if m != nil { + return m.Signatures + } + return nil +} + +// CommitSig is a part of the Vote included in a Commit. +type CommitSig struct { + BlockIdFlag BlockIDFlag `protobuf:"varint,1,opt,name=block_id_flag,json=blockIdFlag,proto3,enum=tendermint.types.BlockIDFlag" json:"block_id_flag,omitempty"` + ValidatorAddress []byte `protobuf:"bytes,2,opt,name=validator_address,json=validatorAddress,proto3" json:"validator_address,omitempty"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *CommitSig) Reset() { *m = CommitSig{} } +func (m *CommitSig) String() string { return proto.CompactTextString(m) } +func (*CommitSig) ProtoMessage() {} +func (*CommitSig) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{7} +} +func (m *CommitSig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitSig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitSig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommitSig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitSig.Merge(m, src) +} +func (m *CommitSig) XXX_Size() int { + return m.Size() +} +func (m *CommitSig) XXX_DiscardUnknown() { + xxx_messageInfo_CommitSig.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitSig proto.InternalMessageInfo + +func (m *CommitSig) GetBlockIdFlag() BlockIDFlag { + if m != nil { + return m.BlockIdFlag + } + return BlockIDFlagUnknown +} + +func (m *CommitSig) GetValidatorAddress() []byte { + if m != nil { + return m.ValidatorAddress + } + return nil +} + +func (m *CommitSig) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *CommitSig) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +type Proposal struct { + Type SignedMsgType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.SignedMsgType" json:"type,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Round int32 `protobuf:"varint,3,opt,name=round,proto3" json:"round,omitempty"` + PolRound int32 `protobuf:"varint,4,opt,name=pol_round,json=polRound,proto3" json:"pol_round,omitempty"` + BlockID BlockID `protobuf:"bytes,5,opt,name=block_id,json=blockId,proto3" json:"block_id"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Signature []byte `protobuf:"bytes,7,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *Proposal) Reset() { *m = Proposal{} } +func (m *Proposal) String() string { return proto.CompactTextString(m) } +func (*Proposal) ProtoMessage() {} +func (*Proposal) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{8} +} +func (m *Proposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Proposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proposal.Merge(m, src) +} +func (m *Proposal) XXX_Size() int { + return m.Size() +} +func (m *Proposal) XXX_DiscardUnknown() { + xxx_messageInfo_Proposal.DiscardUnknown(m) +} + +var xxx_messageInfo_Proposal proto.InternalMessageInfo + +func (m *Proposal) GetType() SignedMsgType { + if m != nil { + return m.Type + } + return UnknownType +} + +func (m *Proposal) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Proposal) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Proposal) GetPolRound() int32 { + if m != nil { + return m.PolRound + } + return 0 +} + +func (m *Proposal) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *Proposal) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *Proposal) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +type SignedHeader struct { + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Commit *Commit `protobuf:"bytes,2,opt,name=commit,proto3" json:"commit,omitempty"` +} + +func (m *SignedHeader) Reset() { *m = SignedHeader{} } +func (m *SignedHeader) String() string { return proto.CompactTextString(m) } +func (*SignedHeader) ProtoMessage() {} +func (*SignedHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{9} +} +func (m *SignedHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedHeader.Merge(m, src) +} +func (m *SignedHeader) XXX_Size() int { + return m.Size() +} +func (m *SignedHeader) XXX_DiscardUnknown() { + xxx_messageInfo_SignedHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedHeader proto.InternalMessageInfo + +func (m *SignedHeader) GetHeader() *Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *SignedHeader) GetCommit() *Commit { + if m != nil { + return m.Commit + } + return nil +} + +type LightBlock struct { + SignedHeader *SignedHeader `protobuf:"bytes,1,opt,name=signed_header,json=signedHeader,proto3" json:"signed_header,omitempty"` + ValidatorSet *ValidatorSet `protobuf:"bytes,2,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` +} + +func (m *LightBlock) Reset() { *m = LightBlock{} } +func (m *LightBlock) String() string { return proto.CompactTextString(m) } +func (*LightBlock) ProtoMessage() {} +func (*LightBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{10} +} +func (m *LightBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LightBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LightBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LightBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_LightBlock.Merge(m, src) +} +func (m *LightBlock) XXX_Size() int { + return m.Size() +} +func (m *LightBlock) XXX_DiscardUnknown() { + xxx_messageInfo_LightBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_LightBlock proto.InternalMessageInfo + +func (m *LightBlock) GetSignedHeader() *SignedHeader { + if m != nil { + return m.SignedHeader + } + return nil +} + +func (m *LightBlock) GetValidatorSet() *ValidatorSet { + if m != nil { + return m.ValidatorSet + } + return nil +} + +type BlockMeta struct { + BlockID BlockID `protobuf:"bytes,1,opt,name=block_id,json=blockId,proto3" json:"block_id"` + BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + Header Header `protobuf:"bytes,3,opt,name=header,proto3" json:"header"` + NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` +} + +func (m *BlockMeta) Reset() { *m = BlockMeta{} } +func (m *BlockMeta) String() string { return proto.CompactTextString(m) } +func (*BlockMeta) ProtoMessage() {} +func (*BlockMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{11} +} +func (m *BlockMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMeta.Merge(m, src) +} +func (m *BlockMeta) XXX_Size() int { + return m.Size() +} +func (m *BlockMeta) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMeta proto.InternalMessageInfo + +func (m *BlockMeta) GetBlockID() BlockID { + if m != nil { + return m.BlockID + } + return BlockID{} +} + +func (m *BlockMeta) GetBlockSize() int64 { + if m != nil { + return m.BlockSize + } + return 0 +} + +func (m *BlockMeta) GetHeader() Header { + if m != nil { + return m.Header + } + return Header{} +} + +func (m *BlockMeta) GetNumTxs() int64 { + if m != nil { + return m.NumTxs + } + return 0 +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +type TxProof struct { + RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Proof *crypto.Proof `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *TxProof) Reset() { *m = TxProof{} } +func (m *TxProof) String() string { return proto.CompactTextString(m) } +func (*TxProof) ProtoMessage() {} +func (*TxProof) Descriptor() ([]byte, []int) { + return fileDescriptor_d3a6e55e2345de56, []int{12} +} +func (m *TxProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TxProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxProof.Merge(m, src) +} +func (m *TxProof) XXX_Size() int { + return m.Size() +} +func (m *TxProof) XXX_DiscardUnknown() { + xxx_messageInfo_TxProof.DiscardUnknown(m) +} + +var xxx_messageInfo_TxProof proto.InternalMessageInfo + +func (m *TxProof) GetRootHash() []byte { + if m != nil { + return m.RootHash + } + return nil +} + +func (m *TxProof) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *TxProof) GetProof() *crypto.Proof { + if m != nil { + return m.Proof + } + return nil +} + +func init() { + proto.RegisterEnum("tendermint.types.BlockIDFlag", BlockIDFlag_name, BlockIDFlag_value) + proto.RegisterEnum("tendermint.types.SignedMsgType", SignedMsgType_name, SignedMsgType_value) + proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.PartSetHeader") + proto.RegisterType((*Part)(nil), "tendermint.types.Part") + proto.RegisterType((*BlockID)(nil), "tendermint.types.BlockID") + proto.RegisterType((*Header)(nil), "tendermint.types.Header") + proto.RegisterType((*Data)(nil), "tendermint.types.Data") + proto.RegisterType((*Vote)(nil), "tendermint.types.Vote") + proto.RegisterType((*Commit)(nil), "tendermint.types.Commit") + proto.RegisterType((*CommitSig)(nil), "tendermint.types.CommitSig") + proto.RegisterType((*Proposal)(nil), "tendermint.types.Proposal") + proto.RegisterType((*SignedHeader)(nil), "tendermint.types.SignedHeader") + proto.RegisterType((*LightBlock)(nil), "tendermint.types.LightBlock") + proto.RegisterType((*BlockMeta)(nil), "tendermint.types.BlockMeta") + proto.RegisterType((*TxProof)(nil), "tendermint.types.TxProof") +} + +func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } + +var fileDescriptor_d3a6e55e2345de56 = []byte{ + // 1314 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x9b, 0xd8, 0x7e, 0xb6, 0x13, 0x67, 0x95, 0xb6, 0xae, 0xdb, 0x38, 0x2b, 0x23, + 0x20, 0x2d, 0x68, 0x53, 0x52, 0xc4, 0x9f, 0x03, 0x07, 0xdb, 0x49, 0x5b, 0xab, 0x89, 0x63, 0xd6, + 0x6e, 0x11, 0x5c, 0x56, 0x6b, 0xef, 0xd4, 0x5e, 0xba, 0xde, 0x59, 0xed, 0x8c, 0x43, 0xd2, 0x4f, + 0x80, 0x72, 0xea, 0x89, 0x5b, 0x4e, 0x70, 0xe0, 0xce, 0x17, 0x40, 0x9c, 0x7a, 0xec, 0x0d, 0x2e, + 0x14, 0x94, 0x4a, 0x88, 0x8f, 0x81, 0xe6, 0x8f, 0xd7, 0xeb, 0x38, 0x86, 0xaa, 0xaa, 0xb8, 0x58, + 0x3b, 0xef, 0xfd, 0xde, 0xcc, 0x7b, 0xbf, 0xf7, 0x9b, 0x3f, 0x86, 0xeb, 0x14, 0xf9, 0x0e, 0x0a, + 0x87, 0xae, 0x4f, 0xb7, 0xe8, 0x71, 0x80, 0x88, 0xf8, 0x35, 0x82, 0x10, 0x53, 0xac, 0x15, 0x26, + 0x5e, 0x83, 0xdb, 0x4b, 0x6b, 0x7d, 0xdc, 0xc7, 0xdc, 0xb9, 0xc5, 0xbe, 0x04, 0xae, 0xb4, 0xd1, + 0xc7, 0xb8, 0xef, 0xa1, 0x2d, 0x3e, 0xea, 0x8e, 0x1e, 0x6d, 0x51, 0x77, 0x88, 0x08, 0xb5, 0x87, + 0x81, 0x04, 0xac, 0xc7, 0x96, 0xe9, 0x85, 0xc7, 0x01, 0xc5, 0x0c, 0x8b, 0x1f, 0x49, 0x77, 0x39, + 0xe6, 0x3e, 0x44, 0x21, 0x71, 0xb1, 0x1f, 0xcf, 0xa3, 0xa4, 0xcf, 0x64, 0x79, 0x68, 0x7b, 0xae, + 0x63, 0x53, 0x1c, 0x0a, 0x44, 0xe5, 0x53, 0xc8, 0xb7, 0xec, 0x90, 0xb6, 0x11, 0xbd, 0x87, 0x6c, + 0x07, 0x85, 0xda, 0x1a, 0x2c, 0x52, 0x4c, 0x6d, 0xaf, 0xa8, 0xe8, 0xca, 0x66, 0xde, 0x14, 0x03, + 0x4d, 0x03, 0x75, 0x60, 0x93, 0x41, 0x31, 0xa1, 0x2b, 0x9b, 0x39, 0x93, 0x7f, 0x57, 0x06, 0xa0, + 0xb2, 0x50, 0x16, 0xe1, 0xfa, 0x0e, 0x3a, 0x1a, 0x47, 0xf0, 0x01, 0xb3, 0x76, 0x8f, 0x29, 0x22, + 0x32, 0x44, 0x0c, 0xb4, 0x0f, 0x61, 0x91, 0xe7, 0x5f, 0x4c, 0xea, 0xca, 0x66, 0x76, 0xbb, 0x68, + 0xc4, 0x88, 0x12, 0xf5, 0x19, 0x2d, 0xe6, 0xaf, 0xa9, 0xcf, 0x5e, 0x6c, 0x2c, 0x98, 0x02, 0x5c, + 0xf1, 0x20, 0x55, 0xf3, 0x70, 0xef, 0x71, 0x63, 0x27, 0x4a, 0x44, 0x99, 0x24, 0xa2, 0xed, 0xc3, + 0x4a, 0x60, 0x87, 0xd4, 0x22, 0x88, 0x5a, 0x03, 0x5e, 0x05, 0x5f, 0x34, 0xbb, 0xbd, 0x61, 0x9c, + 0xef, 0x83, 0x31, 0x55, 0xac, 0x5c, 0x25, 0x1f, 0xc4, 0x8d, 0x95, 0xbf, 0x54, 0x58, 0x92, 0x64, + 0x7c, 0x06, 0x29, 0x49, 0x2b, 0x5f, 0x30, 0xbb, 0xbd, 0x1e, 0x9f, 0x51, 0xba, 0x8c, 0x3a, 0xf6, + 0x09, 0xf2, 0xc9, 0x88, 0xc8, 0xf9, 0xc6, 0x31, 0xda, 0x3b, 0x90, 0xee, 0x0d, 0x6c, 0xd7, 0xb7, + 0x5c, 0x87, 0x67, 0x94, 0xa9, 0x65, 0xcf, 0x5e, 0x6c, 0xa4, 0xea, 0xcc, 0xd6, 0xd8, 0x31, 0x53, + 0xdc, 0xd9, 0x70, 0xb4, 0xcb, 0xb0, 0x34, 0x40, 0x6e, 0x7f, 0x40, 0x39, 0x2d, 0x49, 0x53, 0x8e, + 0xb4, 0x4f, 0x40, 0x65, 0x82, 0x28, 0xaa, 0x7c, 0xed, 0x92, 0x21, 0xd4, 0x62, 0x8c, 0xd5, 0x62, + 0x74, 0xc6, 0x6a, 0xa9, 0xa5, 0xd9, 0xc2, 0x4f, 0xff, 0xd8, 0x50, 0x4c, 0x1e, 0xa1, 0xd5, 0x21, + 0xef, 0xd9, 0x84, 0x5a, 0x5d, 0x46, 0x1b, 0x5b, 0x7e, 0x91, 0x4f, 0x71, 0x75, 0x96, 0x10, 0x49, + 0xac, 0x4c, 0x3d, 0xcb, 0xa2, 0x84, 0xc9, 0xd1, 0x36, 0xa1, 0xc0, 0x27, 0xe9, 0xe1, 0xe1, 0xd0, + 0xa5, 0x16, 0xe7, 0x7d, 0x89, 0xf3, 0xbe, 0xcc, 0xec, 0x75, 0x6e, 0xbe, 0xc7, 0x3a, 0x70, 0x0d, + 0x32, 0x8e, 0x4d, 0x6d, 0x01, 0x49, 0x71, 0x48, 0x9a, 0x19, 0xb8, 0xf3, 0x5d, 0x58, 0x89, 0x54, + 0x47, 0x04, 0x24, 0x2d, 0x66, 0x99, 0x98, 0x39, 0xf0, 0x16, 0xac, 0xf9, 0xe8, 0x88, 0x5a, 0xe7, + 0xd1, 0x19, 0x8e, 0xd6, 0x98, 0xef, 0xe1, 0x74, 0xc4, 0xdb, 0xb0, 0xdc, 0x1b, 0x93, 0x2f, 0xb0, + 0xc0, 0xb1, 0xf9, 0xc8, 0xca, 0x61, 0x57, 0x21, 0x6d, 0x07, 0x81, 0x00, 0x64, 0x39, 0x20, 0x65, + 0x07, 0x01, 0x77, 0xdd, 0x84, 0x55, 0x5e, 0x63, 0x88, 0xc8, 0xc8, 0xa3, 0x72, 0x92, 0x1c, 0xc7, + 0xac, 0x30, 0x87, 0x29, 0xec, 0x1c, 0xfb, 0x16, 0xe4, 0xd1, 0xa1, 0xeb, 0x20, 0xbf, 0x87, 0x04, + 0x2e, 0xcf, 0x71, 0xb9, 0xb1, 0x91, 0x83, 0x6e, 0x40, 0x21, 0x08, 0x71, 0x80, 0x09, 0x0a, 0x2d, + 0xdb, 0x71, 0x42, 0x44, 0x48, 0x71, 0x59, 0xcc, 0x37, 0xb6, 0x57, 0x85, 0xb9, 0x52, 0x04, 0x75, + 0xc7, 0xa6, 0xb6, 0x56, 0x80, 0x24, 0x3d, 0x22, 0x45, 0x45, 0x4f, 0x6e, 0xe6, 0x4c, 0xf6, 0x59, + 0xf9, 0x3b, 0x01, 0xea, 0x43, 0x4c, 0x91, 0x76, 0x1b, 0x54, 0xd6, 0x26, 0xae, 0xbe, 0xe5, 0x8b, + 0xf4, 0xdc, 0x76, 0xfb, 0x3e, 0x72, 0xf6, 0x49, 0xbf, 0x73, 0x1c, 0x20, 0x93, 0x83, 0x63, 0x72, + 0x4a, 0x4c, 0xc9, 0x69, 0x0d, 0x16, 0x43, 0x3c, 0xf2, 0x1d, 0xae, 0xb2, 0x45, 0x53, 0x0c, 0xb4, + 0x5d, 0x48, 0x47, 0x2a, 0x51, 0xff, 0x4b, 0x25, 0x2b, 0x4c, 0x25, 0x4c, 0xc3, 0xd2, 0x60, 0xa6, + 0xba, 0x52, 0x2c, 0x35, 0xc8, 0x44, 0x87, 0x97, 0x54, 0xdb, 0xab, 0x09, 0x76, 0x12, 0xa6, 0xbd, + 0x07, 0xab, 0x51, 0xef, 0x23, 0xf2, 0x84, 0xe2, 0x0a, 0x91, 0x43, 0xb2, 0x37, 0x25, 0x2b, 0x4b, + 0x1c, 0x40, 0x29, 0x5e, 0xd7, 0x44, 0x56, 0x0d, 0x7e, 0x12, 0x5d, 0x87, 0x0c, 0x71, 0xfb, 0xbe, + 0x4d, 0x47, 0x21, 0x92, 0xca, 0x9b, 0x18, 0x2a, 0x3f, 0x2b, 0xb0, 0x24, 0x94, 0x1c, 0xe3, 0x4d, + 0xb9, 0x98, 0xb7, 0xc4, 0x3c, 0xde, 0x92, 0xaf, 0xcf, 0x5b, 0x15, 0x20, 0x4a, 0x86, 0x14, 0x55, + 0x3d, 0xb9, 0x99, 0xdd, 0xbe, 0x36, 0x3b, 0x91, 0x48, 0xb1, 0xed, 0xf6, 0xe5, 0x46, 0x8d, 0x05, + 0x55, 0x7e, 0x57, 0x20, 0x13, 0xf9, 0xb5, 0x2a, 0xe4, 0xc7, 0x79, 0x59, 0x8f, 0x3c, 0xbb, 0x2f, + 0xb5, 0xb3, 0x3e, 0x37, 0xb9, 0x3b, 0x9e, 0xdd, 0x37, 0xb3, 0x32, 0x1f, 0x36, 0xb8, 0xb8, 0x0f, + 0x89, 0x39, 0x7d, 0x98, 0x6a, 0x7c, 0xf2, 0xf5, 0x1a, 0x3f, 0xd5, 0x22, 0xf5, 0x7c, 0x8b, 0x7e, + 0x4a, 0x40, 0xba, 0xc5, 0xf7, 0x8e, 0xed, 0xfd, 0x1f, 0x3b, 0xe2, 0x1a, 0x64, 0x02, 0xec, 0x59, + 0xc2, 0xa3, 0x72, 0x4f, 0x3a, 0xc0, 0x9e, 0x39, 0xd3, 0xf6, 0xc5, 0x37, 0xb4, 0x5d, 0x96, 0xde, + 0x00, 0x6b, 0xa9, 0xf3, 0xac, 0x85, 0x90, 0x13, 0x54, 0xc8, 0xbb, 0xec, 0x16, 0xe3, 0x80, 0x5f, + 0x8e, 0xca, 0xec, 0xdd, 0x2b, 0xd2, 0x16, 0x48, 0x53, 0xe2, 0x58, 0x84, 0x38, 0xfa, 0xe5, 0x75, + 0x5a, 0x9c, 0x27, 0x4b, 0x53, 0xe2, 0x2a, 0xdf, 0x29, 0x00, 0x7b, 0x8c, 0x59, 0x5e, 0x2f, 0xbb, + 0x85, 0x08, 0x4f, 0xc1, 0x9a, 0x5a, 0xb9, 0x3c, 0xaf, 0x69, 0x72, 0xfd, 0x1c, 0x89, 0xe7, 0x5d, + 0x87, 0xfc, 0x44, 0x8c, 0x04, 0x8d, 0x93, 0xb9, 0x60, 0x92, 0xe8, 0x72, 0x68, 0x23, 0x6a, 0xe6, + 0x0e, 0x63, 0xa3, 0xca, 0x2f, 0x0a, 0x64, 0x78, 0x4e, 0xfb, 0x88, 0xda, 0x53, 0x3d, 0x54, 0x5e, + 0xbf, 0x87, 0xeb, 0x00, 0x62, 0x1a, 0xe2, 0x3e, 0x41, 0x52, 0x59, 0x19, 0x6e, 0x69, 0xbb, 0x4f, + 0x90, 0xf6, 0x51, 0x44, 0x78, 0xf2, 0xdf, 0x09, 0x97, 0x5b, 0x7a, 0x4c, 0xfb, 0x15, 0x48, 0xf9, + 0xa3, 0xa1, 0xc5, 0xae, 0x04, 0x55, 0xa8, 0xd5, 0x1f, 0x0d, 0x3b, 0x47, 0xa4, 0xf2, 0x35, 0xa4, + 0x3a, 0x47, 0xfc, 0x79, 0xc4, 0x24, 0x1a, 0x62, 0x2c, 0xef, 0x64, 0xf1, 0x16, 0x4a, 0x33, 0x03, + 0xbf, 0x82, 0x34, 0x50, 0xd9, 0xe5, 0x3b, 0x7e, 0xac, 0xb1, 0x6f, 0xcd, 0x78, 0xc5, 0x87, 0x97, + 0x7c, 0x72, 0xdd, 0xfc, 0x55, 0x81, 0x6c, 0xec, 0x7c, 0xd0, 0x3e, 0x80, 0x4b, 0xb5, 0xbd, 0x83, + 0xfa, 0x7d, 0xab, 0xb1, 0x63, 0xdd, 0xd9, 0xab, 0xde, 0xb5, 0x1e, 0x34, 0xef, 0x37, 0x0f, 0xbe, + 0x68, 0x16, 0x16, 0x4a, 0x97, 0x4f, 0x4e, 0x75, 0x2d, 0x86, 0x7d, 0xe0, 0x3f, 0xf6, 0xf1, 0x37, + 0xbe, 0xb6, 0x05, 0x6b, 0xd3, 0x21, 0xd5, 0x5a, 0x7b, 0xb7, 0xd9, 0x29, 0x28, 0xa5, 0x4b, 0x27, + 0xa7, 0xfa, 0x6a, 0x2c, 0xa2, 0xda, 0x25, 0xc8, 0xa7, 0xb3, 0x01, 0xf5, 0x83, 0xfd, 0xfd, 0x46, + 0xa7, 0x90, 0x98, 0x09, 0x90, 0x07, 0xf6, 0x0d, 0x58, 0x9d, 0x0e, 0x68, 0x36, 0xf6, 0x0a, 0xc9, + 0x92, 0x76, 0x72, 0xaa, 0x2f, 0xc7, 0xd0, 0x4d, 0xd7, 0x2b, 0xa5, 0xbf, 0xfd, 0xbe, 0xbc, 0xf0, + 0xe3, 0x0f, 0x65, 0x85, 0x55, 0x96, 0x9f, 0x3a, 0x23, 0xb4, 0xf7, 0xe1, 0x4a, 0xbb, 0x71, 0xb7, + 0xb9, 0xbb, 0x63, 0xed, 0xb7, 0xef, 0x5a, 0x9d, 0x2f, 0x5b, 0xbb, 0xb1, 0xea, 0x56, 0x4e, 0x4e, + 0xf5, 0xac, 0x2c, 0x69, 0x1e, 0xba, 0x65, 0xee, 0x3e, 0x3c, 0xe8, 0xec, 0x16, 0x14, 0x81, 0x6e, + 0x85, 0xe8, 0x10, 0x53, 0xc4, 0xd1, 0xb7, 0xe0, 0xea, 0x05, 0xe8, 0xa8, 0xb0, 0xd5, 0x93, 0x53, + 0x3d, 0xdf, 0x0a, 0x91, 0xd8, 0x3f, 0x3c, 0xc2, 0x80, 0xe2, 0x6c, 0xc4, 0x41, 0xeb, 0xa0, 0x5d, + 0xdd, 0x2b, 0xe8, 0xa5, 0xc2, 0xc9, 0xa9, 0x9e, 0x1b, 0x1f, 0x86, 0x0c, 0x3f, 0xa9, 0xac, 0xf6, + 0xf9, 0xb3, 0xb3, 0xb2, 0xf2, 0xfc, 0xac, 0xac, 0xfc, 0x79, 0x56, 0x56, 0x9e, 0xbe, 0x2c, 0x2f, + 0x3c, 0x7f, 0x59, 0x5e, 0xf8, 0xed, 0x65, 0x79, 0xe1, 0xab, 0x8f, 0xfb, 0x2e, 0x1d, 0x8c, 0xba, + 0x46, 0x0f, 0x0f, 0xb7, 0xe2, 0x7f, 0x09, 0x26, 0x9f, 0xe2, 0xaf, 0xc9, 0xf9, 0xbf, 0x0b, 0xdd, + 0x25, 0x6e, 0xbf, 0xfd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x78, 0x43, 0xdf, 0xef, 0x0c, + 0x00, 0x00, +} + +func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartSetHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Total != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Part) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Part) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Part) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Bytes) > 0 { + i -= len(m.Bytes) + copy(dAtA[i:], m.Bytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Bytes))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PartSetHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Header) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Header) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProposerAddress) > 0 { + i -= len(m.ProposerAddress) + copy(dAtA[i:], m.ProposerAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i-- + dAtA[i] = 0x72 + } + if len(m.EvidenceHash) > 0 { + i -= len(m.EvidenceHash) + copy(dAtA[i:], m.EvidenceHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i-- + dAtA[i] = 0x6a + } + if len(m.LastResultsHash) > 0 { + i -= len(m.LastResultsHash) + copy(dAtA[i:], m.LastResultsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i-- + dAtA[i] = 0x62 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x5a + } + if len(m.ConsensusHash) > 0 { + i -= len(m.ConsensusHash) + copy(dAtA[i:], m.ConsensusHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i-- + dAtA[i] = 0x52 + } + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x4a + } + if len(m.ValidatorsHash) > 0 { + i -= len(m.ValidatorsHash) + copy(dAtA[i:], m.ValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) + i-- + dAtA[i] = 0x42 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x3a + } + if len(m.LastCommitHash) > 0 { + i -= len(m.LastCommitHash) + copy(dAtA[i:], m.LastCommitHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i-- + dAtA[i] = 0x32 + } + { + size, err := m.LastBlockId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintTypes(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.ChainID) > 0 { + i -= len(m.ChainID) + copy(dAtA[i:], m.ChainID) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Data) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Data) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Data) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Vote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x42 + } + if m.ValidatorIndex != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ValidatorIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x32 + } + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintTypes(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0x2a + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Commit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommitSig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitSig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitSig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x22 + } + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x1a + if len(m.ValidatorAddress) > 0 { + i -= len(m.ValidatorAddress) + copy(dAtA[i:], m.ValidatorAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorAddress))) + i-- + dAtA[i] = 0x12 + } + if m.BlockIdFlag != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockIdFlag)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Proposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Proposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x3a + } + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintTypes(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x32 + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.PolRound != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.PolRound)) + i-- + dAtA[i] = 0x20 + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SignedHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Header != nil { + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LightBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LightBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LightBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.SignedHeader != nil { + { + size, err := m.SignedHeader.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NumTxs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.BlockSize != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BlockID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TxProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartSetHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Part) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Bytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *BlockID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.PartSetHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *Header) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EvidenceHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Data) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Vote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorIndex != 0 { + n += 1 + sovTypes(uint64(m.ValidatorIndex)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *CommitSig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BlockIdFlag != 0 { + n += 1 + sovTypes(uint64(m.BlockIdFlag)) + } + l = len(m.ValidatorAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Proposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if m.PolRound != 0 { + n += 1 + sovTypes(uint64(m.PolRound)) + } + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *SignedHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *LightBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignedHeader != nil { + l = m.SignedHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *BlockMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BlockID.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.BlockSize != 0 { + n += 1 + sovTypes(uint64(m.BlockSize)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.NumTxs != 0 { + n += 1 + sovTypes(uint64(m.NumTxs)) + } + return n +} + +func (m *TxProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Part) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Part: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Part: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) + if m.Bytes == nil { + m.Bytes = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartSetHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartSetHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Header) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Header: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorsHash = append(m.ValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorsHash == nil { + m.ValidatorsHash = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Data) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Data: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Data: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndex", wireType) + } + m.ValidatorIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Commit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Commit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Commit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, CommitSig{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommitSig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommitSig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommitSig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockIdFlag", wireType) + } + m.BlockIdFlag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockIdFlag |= BlockIDFlag(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorAddress = append(m.ValidatorAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorAddress == nil { + m.ValidatorAddress = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SignedMsgType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PolRound", wireType) + } + m.PolRound = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PolRound |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &Header{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = &Commit{} + } + if err := m.Commit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LightBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LightBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LightBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedHeader == nil { + m.SignedHeader = &SignedHeader{} + } + if err := m.SignedHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &ValidatorSet{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BlockID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockSize", wireType) + } + m.BlockSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) + } + m.NumTxs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumTxs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TxProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &crypto.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto new file mode 100644 index 000000000..8d4f00972 --- /dev/null +++ b/proto/tendermint/types/types.proto @@ -0,0 +1,157 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "tendermint/crypto/proof.proto"; +import "tendermint/version/types.proto"; +import "tendermint/types/validator.proto"; + +// BlockIdFlag indicates which BlcokID the signature is for +enum BlockIDFlag { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; +} + +// SignedMsgType is a type of signed message in the consensus. +enum SignedMsgType { + option (gogoproto.goproto_enum_stringer) = true; + option (gogoproto.goproto_enum_prefix) = false; + + SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; + // Votes + SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; + SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; + + // Proposals + SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; +} + +// PartsetHeader +message PartSetHeader { + uint32 total = 1; + bytes hash = 2; +} + +message Part { + uint32 index = 1; + bytes bytes = 2; + tendermint.crypto.Proof proof = 3 [(gogoproto.nullable) = false]; +} + +// BlockID +message BlockID { + bytes hash = 1; + PartSetHeader part_set_header = 2 [(gogoproto.nullable) = false]; +} + +// -------------------------------- + +// Header defines the structure of a Tendermint block header. +message Header { + // basic block info + tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + + // prev block info + BlockID last_block_id = 5 [(gogoproto.nullable) = false]; + + // hashes of block data + bytes last_commit_hash = 6; // commit from validators from the last block + bytes data_hash = 7; // transactions + + // hashes from the app output from the prev block + bytes validators_hash = 8; // validators for the current block + bytes next_validators_hash = 9; // validators for the next block + bytes consensus_hash = 10; // consensus params for current block + bytes app_hash = 11; // state after txs from the previous block + bytes last_results_hash = 12; // root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 13; // evidence included in the block + bytes proposer_address = 14; // original proposer of the block +} + +// Data contains the set of transactions included in the block +message Data { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + repeated bytes txs = 1; +} + +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. +message Vote { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + BlockID block_id = 4 + [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes validator_address = 6; + int32 validator_index = 7; + bytes signature = 8; +} + +// Commit contains the evidence that a block was committed by a set of validators. +message Commit { + int64 height = 1; + int32 round = 2; + BlockID block_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; + repeated CommitSig signatures = 4 [(gogoproto.nullable) = false]; +} + +// CommitSig is a part of the Vote included in a Commit. +message CommitSig { + BlockIDFlag block_id_flag = 1; + bytes validator_address = 2; + google.protobuf.Timestamp timestamp = 3 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 4; +} + +message Proposal { + SignedMsgType type = 1; + int64 height = 2; + int32 round = 3; + int32 pol_round = 4; + BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes signature = 7; +} + +message SignedHeader { + Header header = 1; + Commit commit = 2; +} + +message LightBlock { + SignedHeader signed_header = 1; + tendermint.types.ValidatorSet validator_set = 2; +} + +message BlockMeta { + BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +message TxProof { + bytes root_hash = 1; + bytes data = 2; + tendermint.crypto.Proof proof = 3; +} diff --git a/proto/tendermint/types/validator.pb.go b/proto/tendermint/types/validator.pb.go new file mode 100644 index 000000000..2c3468b83 --- /dev/null +++ b/proto/tendermint/types/validator.pb.go @@ -0,0 +1,953 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/types/validator.proto + +package types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ValidatorSet struct { + Validators []*Validator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators,omitempty"` + Proposer *Validator `protobuf:"bytes,2,opt,name=proposer,proto3" json:"proposer,omitempty"` + TotalVotingPower int64 `protobuf:"varint,3,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +} + +func (m *ValidatorSet) Reset() { *m = ValidatorSet{} } +func (m *ValidatorSet) String() string { return proto.CompactTextString(m) } +func (*ValidatorSet) ProtoMessage() {} +func (*ValidatorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_4e92274df03d3088, []int{0} +} +func (m *ValidatorSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSet.Merge(m, src) +} +func (m *ValidatorSet) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSet) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSet proto.InternalMessageInfo + +func (m *ValidatorSet) GetValidators() []*Validator { + if m != nil { + return m.Validators + } + return nil +} + +func (m *ValidatorSet) GetProposer() *Validator { + if m != nil { + return m.Proposer + } + return nil +} + +func (m *ValidatorSet) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 +} + +type Validator struct { + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + PubKey crypto.PublicKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + VotingPower int64 `protobuf:"varint,3,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` + ProposerPriority int64 `protobuf:"varint,4,opt,name=proposer_priority,json=proposerPriority,proto3" json:"proposer_priority,omitempty"` +} + +func (m *Validator) Reset() { *m = Validator{} } +func (m *Validator) String() string { return proto.CompactTextString(m) } +func (*Validator) ProtoMessage() {} +func (*Validator) Descriptor() ([]byte, []int) { + return fileDescriptor_4e92274df03d3088, []int{1} +} +func (m *Validator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Validator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Validator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validator.Merge(m, src) +} +func (m *Validator) XXX_Size() int { + return m.Size() +} +func (m *Validator) XXX_DiscardUnknown() { + xxx_messageInfo_Validator.DiscardUnknown(m) +} + +var xxx_messageInfo_Validator proto.InternalMessageInfo + +func (m *Validator) GetAddress() []byte { + if m != nil { + return m.Address + } + return nil +} + +func (m *Validator) GetPubKey() crypto.PublicKey { + if m != nil { + return m.PubKey + } + return crypto.PublicKey{} +} + +func (m *Validator) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +func (m *Validator) GetProposerPriority() int64 { + if m != nil { + return m.ProposerPriority + } + return 0 +} + +type SimpleValidator struct { + PubKey *crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + VotingPower int64 `protobuf:"varint,2,opt,name=voting_power,json=votingPower,proto3" json:"voting_power,omitempty"` +} + +func (m *SimpleValidator) Reset() { *m = SimpleValidator{} } +func (m *SimpleValidator) String() string { return proto.CompactTextString(m) } +func (*SimpleValidator) ProtoMessage() {} +func (*SimpleValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_4e92274df03d3088, []int{2} +} +func (m *SimpleValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SimpleValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SimpleValidator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SimpleValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleValidator.Merge(m, src) +} +func (m *SimpleValidator) XXX_Size() int { + return m.Size() +} +func (m *SimpleValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleValidator proto.InternalMessageInfo + +func (m *SimpleValidator) GetPubKey() *crypto.PublicKey { + if m != nil { + return m.PubKey + } + return nil +} + +func (m *SimpleValidator) GetVotingPower() int64 { + if m != nil { + return m.VotingPower + } + return 0 +} + +func init() { + proto.RegisterType((*ValidatorSet)(nil), "tendermint.types.ValidatorSet") + proto.RegisterType((*Validator)(nil), "tendermint.types.Validator") + proto.RegisterType((*SimpleValidator)(nil), "tendermint.types.SimpleValidator") +} + +func init() { proto.RegisterFile("tendermint/types/validator.proto", fileDescriptor_4e92274df03d3088) } + +var fileDescriptor_4e92274df03d3088 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0x4e, 0xc2, 0x40, + 0x10, 0xc6, 0xbb, 0x40, 0x40, 0x17, 0x12, 0x71, 0xe3, 0xa1, 0x41, 0x52, 0x2b, 0x27, 0x12, 0x4d, + 0x9b, 0x68, 0x0c, 0x07, 0x6e, 0x5c, 0xb9, 0x60, 0x49, 0x38, 0x78, 0x69, 0x5a, 0xba, 0xa9, 0x1b, + 0x0a, 0xbb, 0xd9, 0x6e, 0x31, 0xfb, 0x16, 0x3e, 0x8b, 0x4f, 0xc1, 0x91, 0xa3, 0x27, 0x63, 0xe0, + 0x45, 0x4c, 0x5b, 0xfa, 0x27, 0xa8, 0xe1, 0x36, 0x9d, 0xef, 0x9b, 0x99, 0x5f, 0x37, 0x1f, 0xd4, + 0x05, 0x5e, 0x79, 0x98, 0x2f, 0xc9, 0x4a, 0x98, 0x42, 0x32, 0x1c, 0x9a, 0x6b, 0x27, 0x20, 0x9e, + 0x23, 0x28, 0x37, 0x18, 0xa7, 0x82, 0xa2, 0x76, 0xe1, 0x30, 0x12, 0x47, 0xe7, 0xca, 0xa7, 0x3e, + 0x4d, 0x44, 0x33, 0xae, 0x52, 0x5f, 0xa7, 0x5b, 0xda, 0x34, 0xe7, 0x92, 0x09, 0x6a, 0x2e, 0xb0, + 0x0c, 0x53, 0xb5, 0xf7, 0x01, 0x60, 0x6b, 0x96, 0x6d, 0x9e, 0x62, 0x81, 0x86, 0x10, 0xe6, 0x97, + 0x42, 0x15, 0xe8, 0xd5, 0x7e, 0xf3, 0xe1, 0xda, 0x38, 0xbe, 0x65, 0xe4, 0x33, 0x56, 0xc9, 0x8e, + 0x06, 0xf0, 0x8c, 0x71, 0xca, 0x68, 0x88, 0xb9, 0x5a, 0xd1, 0xc1, 0xa9, 0xd1, 0xdc, 0x8c, 0xee, + 0x21, 0x12, 0x54, 0x38, 0x81, 0xbd, 0xa6, 0x82, 0xac, 0x7c, 0x9b, 0xd1, 0x37, 0xcc, 0xd5, 0xaa, + 0x0e, 0xfa, 0x55, 0xab, 0x9d, 0x28, 0xb3, 0x44, 0x98, 0xc4, 0xfd, 0x18, 0xfa, 0x3c, 0xdf, 0x82, + 0x54, 0xd8, 0x70, 0x3c, 0x8f, 0xe3, 0x30, 0xc6, 0x05, 0xfd, 0x96, 0x95, 0x7d, 0xa2, 0x21, 0x6c, + 0xb0, 0xc8, 0xb5, 0x17, 0x58, 0x1e, 0x68, 0xba, 0x65, 0x9a, 0xf4, 0x31, 0x8c, 0x49, 0xe4, 0x06, + 0x64, 0x3e, 0xc6, 0x72, 0x54, 0xdb, 0x7c, 0xdd, 0x28, 0x56, 0x9d, 0x45, 0xee, 0x18, 0x4b, 0x74, + 0x0b, 0x5b, 0x7f, 0xc0, 0x34, 0xd7, 0x05, 0x07, 0xba, 0x83, 0x97, 0xd9, 0x1f, 0xd8, 0x8c, 0x13, + 0xca, 0x89, 0x90, 0x6a, 0x2d, 0x85, 0xce, 0x84, 0xc9, 0xa1, 0xdf, 0x5b, 0xc0, 0x8b, 0x29, 0x59, + 0xb2, 0x00, 0x17, 0xe4, 0x4f, 0x05, 0x1f, 0x38, 0xcd, 0xf7, 0x2f, 0x59, 0xe5, 0x17, 0xd9, 0xe8, + 0x79, 0xb3, 0xd3, 0xc0, 0x76, 0xa7, 0x81, 0xef, 0x9d, 0x06, 0xde, 0xf7, 0x9a, 0xb2, 0xdd, 0x6b, + 0xca, 0xe7, 0x5e, 0x53, 0x5e, 0x06, 0x3e, 0x11, 0xaf, 0x91, 0x6b, 0xcc, 0xe9, 0xd2, 0x2c, 0x67, + 0xac, 0x28, 0xd3, 0x04, 0x1d, 0xe7, 0xcf, 0xad, 0x27, 0xfd, 0xc7, 0x9f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x48, 0xbf, 0x34, 0x35, 0x9a, 0x02, 0x00, 0x00, +} + +func (m *ValidatorSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TotalVotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x18 + } + if m.Proposer != nil { + { + size, err := m.Proposer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Validators) > 0 { + for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProposerPriority != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.ProposerPriority)) + i-- + dAtA[i] = 0x20 + } + if m.VotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintValidator(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SimpleValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SimpleValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SimpleValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VotingPower != 0 { + i = encodeVarintValidator(dAtA, i, uint64(m.VotingPower)) + i-- + dAtA[i] = 0x10 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintValidator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintValidator(dAtA []byte, offset int, v uint64) int { + offset -= sovValidator(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValidatorSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Validators) > 0 { + for _, e := range m.Validators { + l = e.Size() + n += 1 + l + sovValidator(uint64(l)) + } + } + if m.Proposer != nil { + l = m.Proposer.Size() + n += 1 + l + sovValidator(uint64(l)) + } + if m.TotalVotingPower != 0 { + n += 1 + sovValidator(uint64(m.TotalVotingPower)) + } + return n +} + +func (m *Validator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovValidator(uint64(l)) + } + l = m.PubKey.Size() + n += 1 + l + sovValidator(uint64(l)) + if m.VotingPower != 0 { + n += 1 + sovValidator(uint64(m.VotingPower)) + } + if m.ProposerPriority != 0 { + n += 1 + sovValidator(uint64(m.ProposerPriority)) + } + return n +} + +func (m *SimpleValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovValidator(uint64(l)) + } + if m.VotingPower != 0 { + n += 1 + sovValidator(uint64(m.VotingPower)) + } + return n +} + +func sovValidator(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozValidator(x uint64) (n int) { + return sovValidator(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValidatorSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validators = append(m.Validators, &Validator{}) + if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proposer == nil { + m.Proposer = &Validator{} + } + if err := m.Proposer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVotingPower", wireType) + } + m.TotalVotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerPriority", wireType) + } + m.ProposerPriority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposerPriority |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SimpleValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SimpleValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SimpleValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthValidator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthValidator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PubKey == nil { + m.PubKey = &crypto.PublicKey{} + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VotingPower", wireType) + } + m.VotingPower = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowValidator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VotingPower |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipValidator(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthValidator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipValidator(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowValidator + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthValidator + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupValidator + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthValidator + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthValidator = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowValidator = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupValidator = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/types/validator.proto b/proto/tendermint/types/validator.proto new file mode 100644 index 000000000..49860b96d --- /dev/null +++ b/proto/tendermint/types/validator.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package tendermint.types; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; + +import "gogoproto/gogo.proto"; +import "tendermint/crypto/keys.proto"; + +message ValidatorSet { + repeated Validator validators = 1; + Validator proposer = 2; + int64 total_voting_power = 3; +} + +message Validator { + bytes address = 1; + tendermint.crypto.PublicKey pub_key = 2 [(gogoproto.nullable) = false]; + int64 voting_power = 3; + int64 proposer_priority = 4; +} + +message SimpleValidator { + tendermint.crypto.PublicKey pub_key = 1; + int64 voting_power = 2; +} diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go new file mode 100644 index 000000000..9aeb3ae1a --- /dev/null +++ b/proto/tendermint/version/types.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: tendermint/version/types.proto + +package version + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +type Consensus struct { + Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=app,proto3" json:"app,omitempty"` +} + +func (m *Consensus) Reset() { *m = Consensus{} } +func (m *Consensus) String() string { return proto.CompactTextString(m) } +func (*Consensus) ProtoMessage() {} +func (*Consensus) Descriptor() ([]byte, []int) { + return fileDescriptor_f9b42966edc5edad, []int{0} +} +func (m *Consensus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Consensus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Consensus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Consensus) XXX_Merge(src proto.Message) { + xxx_messageInfo_Consensus.Merge(m, src) +} +func (m *Consensus) XXX_Size() int { + return m.Size() +} +func (m *Consensus) XXX_DiscardUnknown() { + xxx_messageInfo_Consensus.DiscardUnknown(m) +} + +var xxx_messageInfo_Consensus proto.InternalMessageInfo + +func (m *Consensus) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *Consensus) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + +func init() { + proto.RegisterType((*Consensus)(nil), "tendermint.version.Consensus") +} + +func init() { proto.RegisterFile("tendermint/version/types.proto", fileDescriptor_f9b42966edc5edad) } + +var fileDescriptor_f9b42966edc5edad = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2b, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xd3, 0x2f, + 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x42, 0xc8, 0xeb, 0x41, + 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xd2, 0xfa, 0x20, 0x16, 0x44, 0xa5, 0x92, 0x25, + 0x17, 0xa7, 0x73, 0x7e, 0x5e, 0x71, 0x6a, 0x5e, 0x71, 0x69, 0xb1, 0x90, 0x08, 0x17, 0x6b, 0x52, + 0x4e, 0x7e, 0x72, 0xb6, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4b, 0x10, 0x84, 0x23, 0x24, 0xc0, 0xc5, + 0x9c, 0x58, 0x50, 0x20, 0xc1, 0x04, 0x16, 0x03, 0x31, 0xad, 0x58, 0x5e, 0x2c, 0x90, 0x67, 0x74, + 0x0a, 0x3e, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, + 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xcb, 0xf4, 0xcc, 0x92, + 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0x24, 0x97, 0x22, 0x31, 0x21, 0xee, 0xc0, 0xf4, + 0x45, 0x12, 0x1b, 0x58, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf0, 0x65, 0xd2, 0xe2, + 0x00, 0x00, 0x00, +} + +func (this *Consensus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Consensus) + if !ok { + that2, ok := that.(Consensus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Block != that1.Block { + return false + } + if this.App != that1.App { + return false + } + return true +} +func (m *Consensus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Consensus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Consensus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.App != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.App)) + i-- + dAtA[i] = 0x10 + } + if m.Block != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Block)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Consensus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != 0 { + n += 1 + sovTypes(uint64(m.Block)) + } + if m.App != 0 { + n += 1 + sovTypes(uint64(m.App)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Consensus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Consensus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Consensus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + m.Block = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Block |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) + } + m.App = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.App |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto new file mode 100644 index 000000000..3c4e4cc53 --- /dev/null +++ b/proto/tendermint/version/types.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package tendermint.version; + +option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; + +import "gogoproto/gogo.proto"; + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +message Consensus { + option (gogoproto.equal) = true; + + uint64 block = 1; + uint64 app = 2; +} diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 066d17295..61652b30b 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -5,6 +5,8 @@ import ( "github.com/tendermint/tendermint/abci/types" ) +//go:generate mockery --case underscore --name AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot + //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level @@ -25,6 +27,7 @@ type AppConnMempool interface { Error() error CheckTxAsync(types.RequestCheckTx) *abcicli.ReqRes + CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error) FlushAsync() *abcicli.ReqRes FlushSync() error @@ -36,8 +39,15 @@ type AppConnQuery interface { EchoSync(string) (*types.ResponseEcho, error) InfoSync(types.RequestInfo) (*types.ResponseInfo, error) QuerySync(types.RequestQuery) (*types.ResponseQuery, error) +} + +type AppConnSnapshot interface { + Error() error - // SetOptionSync(key string, value string) (res types.Result) + ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //----------------------------------------------------------------------------------------- @@ -114,6 +124,10 @@ func (app *appConnMempool) CheckTxAsync(req types.RequestCheckTx) *abcicli.ReqRe return app.appConn.CheckTxAsync(req) } +func (app *appConnMempool) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + return app.appConn.CheckTxSync(req) +} + //------------------------------------------------ // Implements AppConnQuery (subset of abcicli.Client) @@ -142,3 +156,38 @@ func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, e func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { return app.appConn.QuerySync(reqQuery) } + +//------------------------------------------------ +// Implements AppConnSnapshot (subset of abcicli.Client) + +type appConnSnapshot struct { + appConn abcicli.Client +} + +func NewAppConnSnapshot(appConn abcicli.Client) AppConnSnapshot { + return &appConnSnapshot{ + appConn: appConn, + } +} + +func (app *appConnSnapshot) Error() error { + return app.appConn.Error() +} + +func (app *appConnSnapshot) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + return app.appConn.ListSnapshotsSync(req) +} + +func (app *appConnSnapshot) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + return app.appConn.OfferSnapshotSync(req) +} + +func (app *appConnSnapshot) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + return app.appConn.LoadSnapshotChunkSync(req) +} + +func (app *appConnSnapshot) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + return app.appConn.ApplySnapshotChunkSync(req) +} diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index ca15f8977..683fe47e0 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -55,7 +55,11 @@ func TestEcho(t *testing.T) { if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - defer s.Stop() + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) // Start client cli, err := clientCreator.NewABCIClient() @@ -89,7 +93,11 @@ func BenchmarkEcho(b *testing.B) { if err := s.Start(); err != nil { b.Fatalf("Error starting socket server: %v", err.Error()) } - defer s.Stop() + b.Cleanup(func() { + if err := s.Stop(); err != nil { + b.Error(err) + } + }) // Start client cli, err := clientCreator.NewABCIClient() @@ -115,7 +123,7 @@ func BenchmarkEcho(b *testing.B) { b.StopTimer() // info := proxy.InfoSync(types.RequestInfo{""}) - //b.Log("N: ", b.N, info) + // b.Log("N: ", b.N, info) } func TestInfo(t *testing.T) { @@ -128,7 +136,11 @@ func TestInfo(t *testing.T) { if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } - defer s.Stop() + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) // Start client cli, err := clientCreator.NewABCIClient() diff --git a/proxy/client.go b/proxy/client.go index ed48dbc96..1dc6d8853 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -1,18 +1,18 @@ package proxy import ( - "sync" - - "github.com/pkg/errors" + "fmt" abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" + tmsync "github.com/tendermint/tendermint/libs/sync" ) -// NewABCIClient returns newly connected client +// ClientCreator creates new ABCI clients. type ClientCreator interface { + // NewABCIClient returns a new ABCI client. NewABCIClient() (abcicli.Client, error) } @@ -20,13 +20,15 @@ type ClientCreator interface { // local proxy uses a mutex on an in-proc app type localClientCreator struct { - mtx *sync.Mutex + mtx *tmsync.Mutex app types.Application } +// NewLocalClientCreator returns a ClientCreator for the given app, +// which will be running locally. func NewLocalClientCreator(app types.Application) ClientCreator { return &localClientCreator{ - mtx: new(sync.Mutex), + mtx: new(tmsync.Mutex), app: app, } } @@ -44,6 +46,9 @@ type remoteClientCreator struct { mustConnect bool } +// NewRemoteClientCreator returns a ClientCreator for the given address (e.g. +// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you +// want the client to connect before reporting success. func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { return &remoteClientCreator{ addr: addr, @@ -55,14 +60,15 @@ func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCrea func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) if err != nil { - return nil, errors.Wrap(err, "Failed to connect to proxy") + return nil, fmt.Errorf("failed to connect to proxy: %w", err) } + return remoteApp, nil } -//----------------------------------------------------------------- -// default - +// DefaultClientCreator returns a default ClientCreator, which will create a +// local client if addr is one of: 'counter', 'counter_serial', 'kvstore', +// 'persistent_kvstore' or 'noop', otherwise - a remote client. func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { switch addr { case "counter": diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go new file mode 100644 index 000000000..7e5f81489 --- /dev/null +++ b/proxy/mocks/app_conn_consensus.go @@ -0,0 +1,142 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + abcicli "github.com/tendermint/tendermint/abci/client" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnConsensus is an autogenerated mock type for the AppConnConsensus type +type AppConnConsensus struct { + mock.Mock +} + +// BeginBlockSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseBeginBlock + if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseBeginBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitSync provides a mock function with given fields: +func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { + ret := _m.Called() + + var r0 *types.ResponseCommit + if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeliverTxAsync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// EndBlockSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseEndBlock + if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEndBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnConsensus) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InitChainSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInitChain + if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInitChain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetResponseCallback provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) +} diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go new file mode 100644 index 000000000..b068c01eb --- /dev/null +++ b/proxy/mocks/app_conn_mempool.go @@ -0,0 +1,103 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + abcicli "github.com/tendermint/tendermint/abci/client" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnMempool is an autogenerated mock type for the AppConnMempool type +type AppConnMempool struct { + mock.Mock +} + +// CheckTxAsync provides a mock function with given fields: _a0 +func (_m *AppConnMempool) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// CheckTxSync provides a mock function with given fields: _a0 +func (_m *AppConnMempool) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseCheckTx + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnMempool) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FlushAsync provides a mock function with given fields: +func (_m *AppConnMempool) FlushAsync() *abcicli.ReqRes { + ret := _m.Called() + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// FlushSync provides a mock function with given fields: +func (_m *AppConnMempool) FlushSync() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetResponseCallback provides a mock function with given fields: _a0 +func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) +} diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go new file mode 100644 index 000000000..970dfab3a --- /dev/null +++ b/proxy/mocks/app_conn_query.go @@ -0,0 +1,97 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnQuery is an autogenerated mock type for the AppConnQuery type +type AppConnQuery struct { + mock.Mock +} + +// EchoSync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseEcho + if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEcho) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnQuery) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InfoSync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInfo + if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuerySync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseQuery + if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go new file mode 100644 index 000000000..8cd39c923 --- /dev/null +++ b/proxy/mocks/app_conn_snapshot.go @@ -0,0 +1,120 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnSnapshot is an autogenerated mock type for the AppConnSnapshot type +type AppConnSnapshot struct { + mock.Mock +} + +// ApplySnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnSnapshot) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ListSnapshotsSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseListSnapshots) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadSnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OfferSnapshotSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go new file mode 100644 index 000000000..499313d17 --- /dev/null +++ b/proxy/mocks/client_creator.go @@ -0,0 +1,36 @@ +// Code generated by mockery v1.1.1. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + abcicli "github.com/tendermint/tendermint/abci/client" +) + +// ClientCreator is an autogenerated mock type for the ClientCreator type +type ClientCreator struct { + mock.Mock +} + +// NewABCIClient provides a mock function with given fields: +func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { + ret := _m.Called() + + var r0 abcicli.Client + if rf, ok := ret.Get(0).(func() abcicli.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(abcicli.Client) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 364114115..369b685ba 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -1,43 +1,64 @@ package proxy import ( - "github.com/pkg/errors" + "fmt" + "os" + "syscall" + abcicli "github.com/tendermint/tendermint/abci/client" + tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -//----------------------------- +const ( + connConsensus = "consensus" + connMempool = "mempool" + connQuery = "query" + connSnapshot = "snapshot" +) -// Tendermint's interface to the application consists of multiple connections +// AppConns is the Tendermint's interface to the application that consists of +// multiple connections. type AppConns interface { service.Service + // Mempool connection Mempool() AppConnMempool + // Consensus connection Consensus() AppConnConsensus + // Query connection Query() AppConnQuery + // Snapshot connection + Snapshot() AppConnSnapshot } +// NewAppConns calls NewMultiAppConn. func NewAppConns(clientCreator ClientCreator) AppConns { return NewMultiAppConn(clientCreator) } -//----------------------------- -// multiAppConn implements AppConns - -// a multiAppConn is made of a few appConns (mempool, consensus, query) -// and manages their underlying abci clients +// multiAppConn implements AppConns. +// +// A multiAppConn is made of a few appConns and manages their underlying abci +// clients. // TODO: on app restart, clients must reboot together type multiAppConn struct { service.BaseService - mempoolConn AppConnMempool consensusConn AppConnConsensus + mempoolConn AppConnMempool queryConn AppConnQuery + snapshotConn AppConnSnapshot + + consensusConnClient abcicli.Client + mempoolConnClient abcicli.Client + queryConnClient abcicli.Client + snapshotConnClient abcicli.Client clientCreator ClientCreator } -// Make all necessary abci connections to the application +// NewMultiAppConn makes all necessary abci connections to the application. func NewMultiAppConn(clientCreator ClientCreator) AppConns { multiAppConn := &multiAppConn{ clientCreator: clientCreator, @@ -46,54 +67,134 @@ func NewMultiAppConn(clientCreator ClientCreator) AppConns { return multiAppConn } -// Returns the mempool connection func (app *multiAppConn) Mempool() AppConnMempool { return app.mempoolConn } -// Returns the consensus Connection func (app *multiAppConn) Consensus() AppConnConsensus { return app.consensusConn } -// Returns the query Connection func (app *multiAppConn) Query() AppConnQuery { return app.queryConn } +func (app *multiAppConn) Snapshot() AppConnSnapshot { + return app.snapshotConn +} + func (app *multiAppConn) OnStart() error { - // query connection - querycli, err := app.clientCreator.NewABCIClient() + c, err := app.abciClientFor(connQuery) if err != nil { - return errors.Wrap(err, "Error creating ABCI client (query connection)") + return err } - querycli.SetLogger(app.Logger.With("module", "abci-client", "connection", "query")) - if err := querycli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (query connection)") + app.queryConnClient = c + app.queryConn = NewAppConnQuery(c) + + c, err = app.abciClientFor(connSnapshot) + if err != nil { + app.stopAllClients() + return err } - app.queryConn = NewAppConnQuery(querycli) + app.snapshotConnClient = c + app.snapshotConn = NewAppConnSnapshot(c) - // mempool connection - memcli, err := app.clientCreator.NewABCIClient() + c, err = app.abciClientFor(connMempool) if err != nil { - return errors.Wrap(err, "Error creating ABCI client (mempool connection)") + app.stopAllClients() + return err } - memcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "mempool")) - if err := memcli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (mempool connection)") + app.mempoolConnClient = c + app.mempoolConn = NewAppConnMempool(c) + + c, err = app.abciClientFor(connConsensus) + if err != nil { + app.stopAllClients() + return err } - app.mempoolConn = NewAppConnMempool(memcli) + app.consensusConnClient = c + app.consensusConn = NewAppConnConsensus(c) + + // Kill Tendermint if the ABCI application crashes. + go app.killTMOnClientError() + + return nil +} + +func (app *multiAppConn) OnStop() { + app.stopAllClients() +} - // consensus connection - concli, err := app.clientCreator.NewABCIClient() +func (app *multiAppConn) killTMOnClientError() { + killFn := func(conn string, err error, logger tmlog.Logger) { + logger.Error( + fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), + "err", err) + if killErr := kill(); killErr != nil { + logger.Error("Failed to kill this process - please do so manually", "err", killErr) + } + } + + select { + case <-app.consensusConnClient.Quit(): + if err := app.consensusConnClient.Error(); err != nil { + killFn(connConsensus, err, app.Logger) + } + case <-app.mempoolConnClient.Quit(): + if err := app.mempoolConnClient.Error(); err != nil { + killFn(connMempool, err, app.Logger) + } + case <-app.queryConnClient.Quit(): + if err := app.queryConnClient.Error(); err != nil { + killFn(connQuery, err, app.Logger) + } + case <-app.snapshotConnClient.Quit(): + if err := app.snapshotConnClient.Error(); err != nil { + killFn(connSnapshot, err, app.Logger) + } + } +} + +func (app *multiAppConn) stopAllClients() { + if app.consensusConnClient != nil { + if err := app.consensusConnClient.Stop(); err != nil { + app.Logger.Error("error while stopping consensus client", "error", err) + } + } + if app.mempoolConnClient != nil { + if err := app.mempoolConnClient.Stop(); err != nil { + app.Logger.Error("error while stopping mempool client", "error", err) + } + } + if app.queryConnClient != nil { + if err := app.queryConnClient.Stop(); err != nil { + app.Logger.Error("error while stopping query client", "error", err) + } + } + if app.snapshotConnClient != nil { + if err := app.snapshotConnClient.Stop(); err != nil { + app.Logger.Error("error while stopping snapshot client", "error", err) + } + } +} + +func (app *multiAppConn) abciClientFor(conn string) (abcicli.Client, error) { + c, err := app.clientCreator.NewABCIClient() if err != nil { - return errors.Wrap(err, "Error creating ABCI client (consensus connection)") + return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) + } + c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) + if err := c.Start(); err != nil { + return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) } - concli.SetLogger(app.Logger.With("module", "abci-client", "connection", "consensus")) - if err := concli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (consensus connection)") + return c, nil +} + +func kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err } - app.consensusConn = NewAppConnConsensus(concli) - return nil + return p.Signal(syscall.SIGTERM) } diff --git a/proxy/multi_app_conn_test.go b/proxy/multi_app_conn_test.go new file mode 100644 index 000000000..34b0d0830 --- /dev/null +++ b/proxy/multi_app_conn_test.go @@ -0,0 +1,90 @@ +package proxy + +import ( + "errors" + "os" + "os/signal" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abcimocks "github.com/tendermint/tendermint/abci/client/mocks" + "github.com/tendermint/tendermint/proxy/mocks" +) + +func TestAppConns_Start_Stop(t *testing.T) { + quitCh := make(<-chan struct{}) + + clientCreatorMock := &mocks.ClientCreator{} + + clientMock := &abcimocks.Client{} + clientMock.On("SetLogger", mock.Anything).Return().Times(4) + clientMock.On("Start").Return(nil).Times(4) + clientMock.On("Stop").Return(nil).Times(4) + clientMock.On("Quit").Return(quitCh).Times(4) + + clientCreatorMock.On("NewABCIClient").Return(clientMock, nil).Times(4) + + appConns := NewAppConns(clientCreatorMock) + + err := appConns.Start() + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + err = appConns.Stop() + require.NoError(t, err) + + clientMock.AssertExpectations(t) +} + +// Upon failure, we call tmos.Kill +func TestAppConns_Failure(t *testing.T) { + ok := make(chan struct{}) + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM) + go func() { + for range c { + close(ok) + } + }() + + quitCh := make(chan struct{}) + var recvQuitCh <-chan struct{} // nolint:gosimple + recvQuitCh = quitCh + + clientCreatorMock := &mocks.ClientCreator{} + + clientMock := &abcimocks.Client{} + clientMock.On("SetLogger", mock.Anything).Return() + clientMock.On("Start").Return(nil) + clientMock.On("Stop").Return(nil) + + clientMock.On("Quit").Return(recvQuitCh) + clientMock.On("Error").Return(errors.New("EOF")).Once() + + clientCreatorMock.On("NewABCIClient").Return(clientMock, nil) + + appConns := NewAppConns(clientCreatorMock) + + err := appConns.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := appConns.Stop(); err != nil { + t.Error(err) + } + }) + + // simulate failure + close(quitCh) + + select { + case <-ok: + t.Log("SIGTERM successfully received") + case <-time.After(5 * time.Second): + t.Fatal("expected process to receive SIGTERM signal") + } +} diff --git a/proxy/version.go b/proxy/version.go index fb506e659..be890e12e 100644 --- a/proxy/version.go +++ b/proxy/version.go @@ -9,7 +9,7 @@ import ( // the abci.RequestInfo message during handshake with the app. // It contains only compile-time version information. var RequestInfo = abci.RequestInfo{ - Version: version.Version, - BlockVersion: version.BlockProtocol.Uint64(), - P2PVersion: version.P2PProtocol.Uint64(), + Version: version.TMCoreSemVer, + BlockVersion: version.BlockProtocol, + P2PVersion: version.P2PProtocol, } diff --git a/release_notes.md b/release_notes.md new file mode 100644 index 000000000..a537871c5 --- /dev/null +++ b/release_notes.md @@ -0,0 +1 @@ + diff --git a/rpc/client/codec.go b/rpc/client/codec.go deleted file mode 100644 index 2dc0f6319..000000000 --- a/rpc/client/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - types.RegisterEvidences(cdc) -} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index a25b6ebb2..5734d6c1b 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 5 * time.Second +var waitForEventTimeout = 8 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -28,14 +28,18 @@ func MakeTxKV() ([]byte, []byte, []byte) { func TestHeaderEvents(t *testing.T) { for i, c := range GetClients() { - i, c := i, c // capture params + i, c := i, c t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) } evtTyp := types.EventNewBlockHeader @@ -48,35 +52,47 @@ func TestHeaderEvents(t *testing.T) { } } +// subscribe to new blocks and make sure height increments by 1 func TestBlockEvents(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params + for _, c := range GetClients() { + c := c t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) } - // listen for a new block; ensure height increases by 1 + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlock).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { + t.Error(err) + } + }) + var firstBlockHeight int64 - for j := 0; j < 3; j++ { - evtTyp := types.EventNewBlock - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", j, err) - blockEvent, ok := evt.(types.EventDataNewBlock) - require.True(t, ok, "%d: %#v", j, evt) + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) block := blockEvent.Block - if j == 0 { + + if firstBlockHeight == 0 { firstBlockHeight = block.Header.Height - continue } - require.Equal(t, block.Header.Height, firstBlockHeight+int64(j)) + require.Equal(t, firstBlockHeight+i, block.Header.Height) } }) } @@ -86,45 +102,53 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "a func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } func testTxEventsSent(t *testing.T, broadcastMethod string) { - for i, c := range GetClients() { - i, c := i, c // capture params + for _, c := range GetClients() { + c := c t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) } // make the tx _, _, tx := MakeTxKV() - evtTyp := types.EventTx // send - var ( - txres *ctypes.ResultBroadcastTx - err error - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(tx) - case "sync": - txres, err = c.BroadcastTxSync(tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - - require.NoError(t, err) - require.Equal(t, txres.Code, abci.CodeTypeOK) + go func() { + var ( + txres *ctypes.ResultBroadcastTx + err error + ctx = context.Background() + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) + evt, err := client.WaitForOneEvent(c, types.EventTx, waitForEventTimeout) + require.Nil(t, err) + // and make sure it has the proper info txe, ok := evt.(types.EventDataTx) - require.True(t, ok, "%d: %#v", i, evt) + require.True(t, ok) + // make sure this is the proper tx require.EqualValues(t, tx, txe.Tx) require.True(t, txe.Result.IsOK()) diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go new file mode 100644 index 000000000..4a831553c --- /dev/null +++ b/rpc/client/evidence_test.go @@ -0,0 +1,165 @@ +package client_test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/privval" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/rpc/client" + rpctest "github.com/tendermint/tendermint/rpc/test" + "github.com/tendermint/tendermint/types" +) + +// For some reason the empty node used in tests has a time of +// 2018-10-10 08:20:13.695936996 +0000 UTC +// this is because the test genesis time is set here +// so in order to validate evidence we need evidence to be the same time +var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) + +func newEvidence(t *testing.T, val *privval.FilePV, + vote *types.Vote, vote2 *types.Vote, + chainID string) *types.DuplicateVoteEvidence { + + var err error + + v := vote.ToProto() + v2 := vote2.ToProto() + + vote.Signature, err = val.Key.PrivKey.Sign(types.VoteSignBytes(chainID, v)) + require.NoError(t, err) + + vote2.Signature, err = val.Key.PrivKey.Sign(types.VoteSignBytes(chainID, v2)) + require.NoError(t, err) + + validator := types.NewValidator(val.Key.PubKey, 10) + valSet := types.NewValidatorSet([]*types.Validator{validator}) + + return types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) +} + +func makeEvidences( + t *testing.T, + val *privval.FilePV, + chainID string, +) (correct *types.DuplicateVoteEvidence, fakes []*types.DuplicateVoteEvidence) { + vote := types.Vote{ + ValidatorAddress: val.Key.Address, + ValidatorIndex: 0, + Height: 1, + Round: 0, + Type: tmproto.PrevoteType, + Timestamp: defaultTestTime, + BlockID: types.BlockID{ + Hash: tmhash.Sum(tmrand.Bytes(tmhash.Size)), + PartSetHeader: types.PartSetHeader{ + Total: 1000, + Hash: tmhash.Sum([]byte("partset")), + }, + }, + } + + vote2 := vote + vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) + correct = newEvidence(t, val, &vote, &vote2, chainID) + + fakes = make([]*types.DuplicateVoteEvidence, 0) + + // different address + { + v := vote2 + v.ValidatorAddress = []byte("some_address") + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + } + + // different height + { + v := vote2 + v.Height = vote.Height + 1 + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + } + + // different round + { + v := vote2 + v.Round = vote.Round + 1 + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + } + + // different type + { + v := vote2 + v.Type = tmproto.PrecommitType + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + } + + // exactly same vote + { + v := vote + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID)) + } + + return correct, fakes +} + +func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { + var ( + config = rpctest.GetConfig() + chainID = config.ChainID() + ) + pv, err := privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) + require.NoError(t, err) + + for i, c := range GetClients() { + correct, fakes := makeEvidences(t, pv, chainID) + t.Logf("client %d", i) + + result, err := c.BroadcastEvidence(context.Background(), correct) + require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) + assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") + + status, err := c.Status(context.Background()) + require.NoError(t, err) + err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + require.NoError(t, err) + + ed25519pub := pv.Key.PubKey.(ed25519.PubKey) + rawpub := ed25519pub.Bytes() + result2, err := c.ABCIQuery(context.Background(), "/val", rawpub) + require.NoError(t, err) + qres := result2.Response + require.True(t, qres.IsOK()) + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + pk, err := cryptoenc.PubKeyFromProto(v.PubKey) + require.NoError(t, err) + + require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(context.Background(), fake) + require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) + } + } +} + +func TestBroadcastEmptyEvidence(t *testing.T) { + for _, c := range GetClients() { + _, err := c.BroadcastEvidence(context.Background(), nil) + assert.Error(t, err) + } +} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 5d87a3a98..474aba1b6 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -2,6 +2,7 @@ package client_test import ( "bytes" + "context" "fmt" "log" @@ -21,7 +22,7 @@ func ExampleHTTP_simple() { rpcAddr := rpctest.GetConfig().RPC.ListenAddress c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { - log.Fatal(err) + log.Fatal(err) //nolint:gocritic } // Create a transaction @@ -31,7 +32,7 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - bres, err := c.BroadcastTxCommit(tx) + bres, err := c.BroadcastTxCommit(context.Background(), tx) if err != nil { log.Fatal(err) } @@ -40,7 +41,7 @@ func ExampleHTTP_simple() { } // Now try to fetch the value for the key - qres, err := c.ABCIQuery("/key", k) + qres, err := c.ABCIQuery(context.Background(), "/key", k) if err != nil { log.Fatal(err) } @@ -68,7 +69,6 @@ func ExampleHTTP_batching() { // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() node := rpctest.StartTendermint(app, rpctest.SuppressStdout, rpctest.RecreateConfig) - defer rpctest.StopTendermint(node) // Create our RPC client rpcAddr := rpctest.GetConfig().RPC.ListenAddress @@ -77,6 +77,8 @@ func ExampleHTTP_batching() { log.Fatal(err) } + defer rpctest.StopTendermint(node) + // Create our two transactions k1 := []byte("firstName") v1 := []byte("satoshi") @@ -95,26 +97,26 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(tx); err != nil { - log.Fatal(err) + if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { + log.Fatal(err) //nolint:gocritic } } // Send the batch of 2 transactions - if _, err := batch.Send(); err != nil { + if _, err := batch.Send(context.Background()); err != nil { log.Fatal(err) } // Now let's query for the original results as a batch keys := [][]byte{k1, k2} for _, key := range keys { - if _, err := batch.ABCIQuery("/key", key); err != nil { + if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { log.Fatal(err) } } // Send the 2 queries and keep the results - results, err := batch.Send() + results, err := batch.Send(context.Background()) if err != nil { log.Fatal(err) } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 0e54ec03b..78579b8a3 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -2,10 +2,10 @@ package client import ( "context" + "errors" + "fmt" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/types" ) @@ -16,7 +16,7 @@ type Waiter func(delta int64) (abort error) // but you can plug in another one func DefaultWaitStrategy(delta int64) (abort error) { if delta > 10 { - return errors.Errorf("waiting for %d blocks... aborting", delta) + return fmt.Errorf("waiting for %d blocks... aborting", delta) } else if delta > 0 { // estimate of wait time.... // wait half a second for the next block (in progress) @@ -38,7 +38,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { } delta := int64(1) for delta > 0 { - s, err := c.Status() + s, err := c.Status(context.Background()) if err != nil { return err } @@ -48,6 +48,7 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { return err } } + return nil } @@ -64,10 +65,14 @@ func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (type // register for the next event of this type eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(evtTyp).String()) if err != nil { - return nil, errors.Wrap(err, "failed to subscribe") + return nil, fmt.Errorf("failed to subscribe: %w", err) } // make sure to unregister after the test is over - defer c.UnsubscribeAll(ctx, subscriber) + defer func() { + if deferErr := c.UnsubscribeAll(ctx, subscriber); deferErr != nil { + panic(err) + } + }() select { case event := <-eventCh: diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 9af0b6cf4..8fc0c7a91 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,22 +2,20 @@ package http import ( "context" + "errors" "net/http" "strings" - "sync" "time" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/bytes" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" + tmsync "github.com/tendermint/tendermint/libs/sync" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcclientlib "github.com/tendermint/tendermint/rpc/lib/client" + jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -62,7 +60,7 @@ Example: */ type HTTP struct { remote string - rpc *rpcclientlib.JSONRPCClient + rpc *jsonrpcclient.Client *baseRPCClient *WSEvents @@ -79,7 +77,7 @@ type HTTP struct { // batch, but ordering of transactions in the batch cannot be guaranteed in such // an example. type BatchHTTP struct { - rpcBatch *rpcclientlib.JSONRPCRequestBatch + rpcBatch *jsonrpcclient.RequestBatch *baseRPCClient } @@ -97,7 +95,7 @@ type rpcClient interface { // baseRPCClient implements the basic RPC method logic without the actual // underlying RPC call functionality, which is provided by `caller`. type baseRPCClient struct { - caller rpcclientlib.JSONRPCCaller + caller jsonrpcclient.Caller } var _ rpcClient = (*HTTP)(nil) @@ -111,16 +109,17 @@ var _ rpcClient = (*baseRPCClient)(nil) // the websocket path (which always seems to be "/websocket") // An error is returned on invalid remote. The function panics when remote is nil. func New(remote, wsEndpoint string) (*HTTP, error) { - httpClient, err := rpcclientlib.DefaultHTTPClient(remote) + httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) if err != nil { return nil, err } return NewWithClient(remote, wsEndpoint, httpClient) } -// Create timeout enabled http client +// NewWithTimeout does the same thing as New, except you can set a Timeout for +// http.Client. A Timeout of zero means no timeout. func NewWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { - httpClient, err := rpcclientlib.DefaultHTTPClient(remote) + httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) if err != nil { return nil, err } @@ -135,15 +134,12 @@ func NewWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error panic("nil http.Client provided") } - rc, err := rpcclientlib.NewJSONRPCClientWithHTTPClient(remote, client) + rc, err := jsonrpcclient.NewWithHTTPClient(remote, client) if err != nil { return nil, err } - cdc := rc.Codec() - ctypes.RegisterAmino(cdc) - rc.SetCodec(cdc) - wsEvents, err := newWSEvents(cdc, remote, wsEndpoint) + wsEvents, err := newWSEvents(remote, wsEndpoint) if err != nil { return nil, err } @@ -188,8 +184,8 @@ func (c *HTTP) NewBatch() *BatchHTTP { // compilation of the batched requests and send them off using the client as a // single request. On success, this returns a list of the deserialized results // from each request in the sent batch. -func (b *BatchHTTP) Send() ([]interface{}, error) { - return b.rpcBatch.Send() +func (b *BatchHTTP) Send(ctx context.Context) ([]interface{}, error) { + return b.rpcBatch.Send(ctx) } // Clear will empty out this batch of requests and return the number of requests @@ -206,226 +202,326 @@ func (b *BatchHTTP) Count() int { //----------------------------------------------------------------------------- // baseRPCClient -func (c *baseRPCClient) Status() (*ctypes.ResultStatus, error) { +func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) - _, err := c.caller.Call("status", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "Status") + return nil, err } + return result, nil } -func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { result := new(ctypes.ResultABCIInfo) - _, err := c.caller.Call("abci_info", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "ABCIInfo") + return nil, err } + return result, nil } -func (c *baseRPCClient) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) +func (c *baseRPCClient) ABCIQuery( + ctx context.Context, + path string, + data bytes.HexBytes, +) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } func (c *baseRPCClient) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) - _, err := c.caller.Call("abci_query", + _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { - return nil, errors.Wrap(err, "ABCIQuery") + return nil, err } + return result, nil } -func (c *baseRPCClient) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *baseRPCClient) BroadcastTxCommit( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTxCommit, error) { result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.caller.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { - return nil, errors.Wrap(err, "broadcast_tx_commit") + return nil, err } return result, nil } -func (c *baseRPCClient) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_async", tx) +func (c *baseRPCClient) BroadcastTxAsync( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_async", tx) } -func (c *baseRPCClient) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_sync", tx) +func (c *baseRPCClient) BroadcastTxSync( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } -func (c *baseRPCClient) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) broadcastTX( + ctx context.Context, + route string, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { result := new(ctypes.ResultBroadcastTx) - _, err := c.caller.Call(route, map[string]interface{}{"tx": tx}, result) + _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) if err != nil { - return nil, errors.Wrap(err, route) + return nil, err } return result, nil } -func (c *baseRPCClient) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) UnconfirmedTxs( + ctx context.Context, + limit *int, +) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.caller.Call("unconfirmed_txs", map[string]interface{}{"limit": limit}, result) + params := make(map[string]interface{}) + if limit != nil { + params["limit"] = limit + } + _, err := c.caller.Call(ctx, "unconfirmed_txs", params, result) if err != nil { - return nil, errors.Wrap(err, "unconfirmed_txs") + return nil, err } return result, nil } -func (c *baseRPCClient) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { result := new(ctypes.ResultUnconfirmedTxs) - _, err := c.caller.Call("num_unconfirmed_txs", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "num_unconfirmed_txs") + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + result := new(ctypes.ResultCheckTx) + _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, err } return result, nil } -func (c *baseRPCClient) NetInfo() (*ctypes.ResultNetInfo, error) { +func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { result := new(ctypes.ResultNetInfo) - _, err := c.caller.Call("net_info", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "NetInfo") + return nil, err } return result, nil } -func (c *baseRPCClient) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { result := new(ctypes.ResultDumpConsensusState) - _, err := c.caller.Call("dump_consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "DumpConsensusState") + return nil, err } return result, nil } -func (c *baseRPCClient) ConsensusState() (*ctypes.ResultConsensusState, error) { +func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { result := new(ctypes.ResultConsensusState) - _, err := c.caller.Call("consensus_state", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "ConsensusState") + return nil, err } return result, nil } -func (c *baseRPCClient) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *baseRPCClient) ConsensusParams( + ctx context.Context, + height *int64, +) (*ctypes.ResultConsensusParams, error) { result := new(ctypes.ResultConsensusParams) - _, err := c.caller.Call("consensus_params", map[string]interface{}{"height": height}, result) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "consensus_params", params, result) if err != nil { - return nil, errors.Wrap(err, "ConsensusParams") + return nil, err } return result, nil } -func (c *baseRPCClient) Health() (*ctypes.ResultHealth, error) { +func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { result := new(ctypes.ResultHealth) - _, err := c.caller.Call("health", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) if err != nil { - return nil, errors.Wrap(err, "Health") + return nil, err } return result, nil } -func (c *baseRPCClient) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *baseRPCClient) BlockchainInfo( + ctx context.Context, + minHeight, + maxHeight int64, +) (*ctypes.ResultBlockchainInfo, error) { result := new(ctypes.ResultBlockchainInfo) - _, err := c.caller.Call("blockchain", + _, err := c.caller.Call(ctx, "blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) if err != nil { - return nil, errors.Wrap(err, "BlockchainInfo") + return nil, err } return result, nil } -func (c *baseRPCClient) Genesis() (*ctypes.ResultGenesis, error) { +func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { result := new(ctypes.ResultGenesis) - _, err := c.caller.Call("genesis", map[string]interface{}{}, result) + _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { + result := new(ctypes.ResultBlock) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "block", params, result) if err != nil { - return nil, errors.Wrap(err, "Genesis") + return nil, err } return result, nil } -func (c *baseRPCClient) Block(height *int64) (*ctypes.ResultBlock, error) { +func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) - _, err := c.caller.Call("block", map[string]interface{}{"height": height}, result) + params := map[string]interface{}{ + "hash": hash, + } + _, err := c.caller.Call(ctx, "block_by_hash", params, result) if err != nil { - return nil, errors.Wrap(err, "Block") + return nil, err } return result, nil } -func (c *baseRPCClient) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { +func (c *baseRPCClient) BlockResults( + ctx context.Context, + height *int64, +) (*ctypes.ResultBlockResults, error) { result := new(ctypes.ResultBlockResults) - _, err := c.caller.Call("block_results", map[string]interface{}{"height": height}, result) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "block_results", params, result) if err != nil { - return nil, errors.Wrap(err, "Block Result") + return nil, err } return result, nil } -func (c *baseRPCClient) Commit(height *int64) (*ctypes.ResultCommit, error) { +func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { result := new(ctypes.ResultCommit) - _, err := c.caller.Call("commit", map[string]interface{}{"height": height}, result) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "commit", params, result) if err != nil { - return nil, errors.Wrap(err, "Commit") + return nil, err } return result, nil } -func (c *baseRPCClient) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { result := new(ctypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, } - _, err := c.caller.Call("tx", params, result) + _, err := c.caller.Call(ctx, "tx", params, result) if err != nil { - return nil, errors.Wrap(err, "Tx") + return nil, err } return result, nil } -func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int, orderBy string) ( +func (c *baseRPCClient) TxSearch( + ctx context.Context, + query string, + prove bool, + page, + perPage *int, + orderBy string, +) ( *ctypes.ResultTxSearch, error) { result := new(ctypes.ResultTxSearch) params := map[string]interface{}{ "query": query, "prove": prove, - "page": page, - "per_page": perPage, "order_by": orderBy, } - _, err := c.caller.Call("tx_search", params, result) + if page != nil { + params["page"] = page + } + if perPage != nil { + params["per_page"] = perPage + } + _, err := c.caller.Call(ctx, "tx_search", params, result) if err != nil { - return nil, errors.Wrap(err, "TxSearch") + return nil, err } return result, nil } -func (c *baseRPCClient) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { +func (c *baseRPCClient) Validators( + ctx context.Context, + height *int64, + page, + perPage *int, +) (*ctypes.ResultValidators, error) { result := new(ctypes.ResultValidators) - _, err := c.caller.Call("validators", map[string]interface{}{ - "height": height, - "page": page, - "per_page": perPage, - }, result) + params := make(map[string]interface{}) + if page != nil { + params["page"] = page + } + if perPage != nil { + params["per_page"] = perPage + } + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "validators", params, result) if err != nil { - return nil, errors.Wrap(err, "Validators") + return nil, err } return result, nil } -func (c *baseRPCClient) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *baseRPCClient) BroadcastEvidence( + ctx context.Context, + ev types.Evidence, +) (*ctypes.ResultBroadcastEvidence, error) { result := new(ctypes.ResultBroadcastEvidence) - _, err := c.caller.Call("broadcast_evidence", map[string]interface{}{"evidence": ev}, result) + _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) if err != nil { - return nil, errors.Wrap(err, "BroadcastEvidence") + return nil, err } return result, nil } @@ -438,18 +534,16 @@ var errNotRunning = errors.New("client is not running. Use .Start() method to st // WSEvents is a wrapper around WSClient, which implements EventsClient. type WSEvents struct { service.BaseService - cdc *amino.Codec remote string endpoint string - ws *rpcclientlib.WSClient + ws *jsonrpcclient.WSClient - mtx sync.RWMutex + mtx tmsync.RWMutex subscriptions map[string]chan ctypes.ResultEvent // query -> chan } -func newWSEvents(cdc *amino.Codec, remote, endpoint string) (*WSEvents, error) { +func newWSEvents(remote, endpoint string) (*WSEvents, error) { w := &WSEvents{ - cdc: cdc, endpoint: endpoint, remote: remote, subscriptions: make(map[string]chan ctypes.ResultEvent), @@ -457,14 +551,13 @@ func newWSEvents(cdc *amino.Codec, remote, endpoint string) (*WSEvents, error) { w.BaseService = *service.NewBaseService(nil, "WSEvents", w) var err error - w.ws, err = rpcclientlib.NewWSClient(w.remote, w.endpoint, rpcclientlib.OnReconnect(func() { + w.ws, err = jsonrpcclient.NewWS(w.remote, w.endpoint, jsonrpcclient.OnReconnect(func() { // resubscribe immediately w.redoSubscriptionsAfter(0 * time.Second) })) if err != nil { return nil, err } - w.ws.SetCodec(w.cdc) w.ws.SetLogger(w.Logger) return w, nil @@ -483,7 +576,9 @@ func (w *WSEvents) OnStart() error { // OnStop implements service.Service by stopping WSClient. func (w *WSEvents) OnStop() { - _ = w.ws.Stop() + if err := w.ws.Stop(); err != nil { + w.Logger.Error("Can't stop ws client", "err", err) + } } // Subscribe implements EventsClient by using WSClient to subscribe given @@ -604,7 +699,7 @@ func (w *WSEvents) eventListener() { } result := new(ctypes.ResultEvent) - err := w.cdc.UnmarshalJSON(resp.Result, result) + err := tmjson.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) continue diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 408d803c8..c6ff0fee2 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -50,47 +50,49 @@ type Client interface { // is easier to mock. type ABCIClient interface { // Reading from abci app - ABCIInfo() (*ctypes.ResultABCIInfo, error) - ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) - ABCIQueryWithOptions(path string, data bytes.HexBytes, + ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures // and prove anything about the chain. type SignClient interface { - Block(height *int64) (*ctypes.ResultBlock, error) - BlockResults(height *int64) (*ctypes.ResultBlockResults, error) - Commit(height *int64) (*ctypes.ResultCommit, error) - Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) - Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) - TxSearch(query string, prove bool, page, perPage int, orderBy string) (*ctypes.ResultTxSearch, error) + Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) + Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) + Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) + Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) + TxSearch(ctx context.Context, query string, prove bool, page, perPage *int, + orderBy string) (*ctypes.ResultTxSearch, error) } // HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { - Genesis() (*ctypes.ResultGenesis, error) - BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) + Genesis(context.Context) (*ctypes.ResultGenesis, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } // StatusClient provides access to general chain info. type StatusClient interface { - Status() (*ctypes.ResultStatus, error) + Status(context.Context) (*ctypes.ResultStatus, error) } // NetworkClient is general info about the network state. May not be needed // usually. type NetworkClient interface { - NetInfo() (*ctypes.ResultNetInfo, error) - DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) - ConsensusState() (*ctypes.ResultConsensusState, error) - ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) - Health() (*ctypes.ResultHealth, error) + NetInfo(context.Context) (*ctypes.ResultNetInfo, error) + DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) + ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) + ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) + Health(context.Context) (*ctypes.ResultHealth, error) } // EventsClient is reactive, you can subscribe to any message, given the proper @@ -112,12 +114,21 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) + UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) + CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) } // EvidenceClient is used for submitting an evidence of the malicious // behaviour. type EvidenceClient interface { - BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +} + +// RemoteClient is a Client, which can also return the remote network address. +type RemoteClient interface { + Client + + // Remote returns the remote network address in a string form. + Remote() string } diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index a28e9f939..eb9cc485b 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -2,10 +2,9 @@ package local import ( "context" + "fmt" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -14,7 +13,7 @@ import ( rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -51,7 +50,9 @@ type Local struct { // don't run in parallel, or try to simulate an entire network in // one process... func New(node *nm.Node) *Local { - node.ConfigureRPC() + if err := node.ConfigureRPC(); err != nil { + node.Logger.Error("Error configuring RPC", "err", err) + } return &Local{ EventBus: node.EventBus(), Logger: log.NewNopLogger(), @@ -66,107 +67,128 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status() (*ctypes.ResultStatus, error) { +func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { return core.Status(c.ctx) } -func (c *Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return core.ABCIInfo(c.ctx) } -func (c *Local) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) +func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } func (c *Local) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return core.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return core.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return core.NumUnconfirmedTxs(c.ctx) } -func (c *Local) NetInfo() (*ctypes.ResultNetInfo, error) { +func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + return core.CheckTx(c.ctx, tx) +} + +func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return core.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return core.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState() (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return core.ConsensusState(c.ctx) } -func (c *Local) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return core.ConsensusParams(c.ctx, height) } -func (c *Local) Health() (*ctypes.ResultHealth, error) { +func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return core.Health(c.ctx) } -func (c *Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(c.ctx, seeds) } -func (c *Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(c.ctx, peers, persistent) +func (c *Local) DialPeers( + ctx context.Context, + peers []string, + persistent, + unconditional, + private bool, +) (*ctypes.ResultDialPeers, error) { + return core.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) } -func (c *Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return core.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis() (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { return core.Genesis(c.ctx) } -func (c *Local) Block(height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { return core.Block(c.ctx, height) } -func (c *Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + return core.BlockByHash(c.ctx, hash) +} + +func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { return core.BlockResults(c.ctx, height) } -func (c *Local) Commit(height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { return core.Commit(c.ctx, height) } -func (c *Local) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return core.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { return core.Tx(c.ctx, hash, prove) } -func (c *Local) TxSearch(query string, prove bool, page, perPage int, orderBy string) ( - *ctypes.ResultTxSearch, error) { +func (c *Local) TxSearch( + ctx context.Context, + query string, + prove bool, + page, + perPage *int, + orderBy string, +) (*ctypes.ResultTxSearch, error) { return core.TxSearch(c.ctx, query, prove, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return core.BroadcastEvidence(c.ctx, ev) } @@ -177,7 +199,7 @@ func (c *Local) Subscribe( outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { q, err := tmquery.New(query) if err != nil { - return nil, errors.Wrap(err, "failed to parse query") + return nil, fmt.Errorf("failed to parse query: %w", err) } outCap := 1 @@ -192,7 +214,7 @@ func (c *Local) Subscribe( sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) } if err != nil { - return nil, errors.Wrap(err, "failed to subscribe") + return nil, fmt.Errorf("failed to subscribe: %w", err) } outc := make(chan ctypes.ResultEvent, outCap) @@ -256,7 +278,7 @@ func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscript func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { q, err := tmquery.New(query) if err != nil { - return errors.Wrap(err, "failed to parse query") + return fmt.Errorf("failed to parse query: %w", err) } return c.EventBus.Unsubscribe(ctx, subscriber, q) } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index d600b32f8..c97311c81 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -18,6 +18,7 @@ func TestMain(m *testing.M) { if err != nil { panic(err) } + app := kvstore.NewPersistentKVStoreApplication(dir) node = rpctest.StartTendermint(app) @@ -25,5 +26,6 @@ func TestMain(m *testing.M) { // and shut down proper at the end rpctest.StopTendermint(node) + _ = os.RemoveAll(dir) os.Exit(code) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index ebee8b4e8..0737deec0 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -1,6 +1,8 @@ package mock import ( + "context" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/proxy" @@ -22,15 +24,16 @@ var ( _ client.ABCIClient = (*ABCIRecorder)(nil) ) -func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (a ABCIApp) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil } -func (a ABCIApp) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { + return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } func (a ABCIApp) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { @@ -46,7 +49,7 @@ func (a ABCIApp) ABCIQueryWithOptions( // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { @@ -57,11 +60,11 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit return &res, nil } -func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck + go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } return &ctypes.ResultBroadcastTx{ Code: c.Code, @@ -72,11 +75,11 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error }, nil } -func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck + go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } return &ctypes.ResultBroadcastTx{ Code: c.Code, @@ -97,7 +100,7 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (m ABCIMock) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { return nil, err @@ -105,11 +108,12 @@ func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil } -func (m ABCIMock) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { + return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } func (m ABCIMock) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { @@ -121,7 +125,7 @@ func (m ABCIMock) ABCIQueryWithOptions( return &ctypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err @@ -129,7 +133,7 @@ func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommi return res.(*ctypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -137,7 +141,7 @@ func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, erro return res.(*ctypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -170,8 +174,8 @@ func (r *ABCIRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - res, err := r.Client.ABCIInfo() +func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { + res, err := r.Client.ABCIInfo(ctx) r.addCall(Call{ Name: "abci_info", Response: res, @@ -180,15 +184,20 @@ func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return res, err } -func (r *ABCIRecorder) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +func (r *ABCIRecorder) ABCIQuery( + ctx context.Context, + path string, + data bytes.HexBytes, +) (*ctypes.ResultABCIQuery, error) { + return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } func (r *ABCIRecorder) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := r.Client.ABCIQueryWithOptions(path, data, opts) + res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts) r.addCall(Call{ Name: "abci_query", Args: QueryArgs{path, data, opts.Height, opts.Prove}, @@ -198,8 +207,8 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( return res, err } -func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res, err := r.Client.BroadcastTxCommit(tx) +func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + res, err := r.Client.BroadcastTxCommit(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_commit", Args: tx, @@ -209,8 +218,8 @@ func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTx return res, err } -func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxAsync(tx) +func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTxAsync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_async", Args: tx, @@ -220,8 +229,8 @@ func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, return res, err } -func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxSync(tx) +func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + res, err := r.Client.BroadcastTxSync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_sync", Args: tx, diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 1d923f996..d164b275a 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -1,11 +1,11 @@ package mock_test import ( + "context" + "errors" "fmt" "testing" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -46,12 +46,12 @@ func TestABCIMock(t *testing.T) { } // now, let's try to make some calls - _, err := m.ABCIInfo() + _, err := m.ABCIInfo(context.Background()) require.NotNil(err) assert.Equal("foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false}) + _query, err := m.ABCIQueryWithOptions(context.Background(), "/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response require.Nil(err) require.NotNil(query) @@ -60,18 +60,18 @@ func TestABCIMock(t *testing.T) { assert.Equal(height, query.Height) // non-commit calls always return errors - _, err = m.BroadcastTxSync(goodTx) + _, err = m.BroadcastTxSync(context.Background(), goodTx) require.NotNil(err) assert.Equal("must commit", err.Error()) - _, err = m.BroadcastTxAsync(goodTx) + _, err = m.BroadcastTxAsync(context.Background(), goodTx) require.NotNil(err) assert.Equal("must commit", err.Error()) // commit depends on the input - _, err = m.BroadcastTxCommit(badTx) + _, err = m.BroadcastTxCommit(context.Background(), badTx) require.NotNil(err) assert.Equal("bad tx", err.Error()) - bres, err := m.BroadcastTxCommit(goodTx) + bres, err := m.BroadcastTxCommit(context.Background(), goodTx) require.Nil(err, "%+v", err) assert.EqualValues(0, bres.CheckTx.Code) assert.EqualValues("stand", bres.CheckTx.Data) @@ -95,10 +95,15 @@ func TestABCIRecorder(t *testing.T) { require.Equal(0, len(r.Calls)) - _, err := r.ABCIInfo() + _, err := r.ABCIInfo(context.Background()) assert.Nil(err, "expected no err on info") - _, err = r.ABCIQueryWithOptions("path", bytes.HexBytes("data"), client.ABCIQueryOptions{Prove: false}) + _, err = r.ABCIQueryWithOptions( + context.Background(), + "path", + bytes.HexBytes("data"), + client.ABCIQueryOptions{Prove: false}, + ) assert.NotNil(err, "expected error on query") require.Equal(2, len(r.Calls)) @@ -126,11 +131,11 @@ func TestABCIRecorder(t *testing.T) { // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} - _, err = r.BroadcastTxCommit(txs[0]) + _, err = r.BroadcastTxCommit(context.Background(), txs[0]) assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxSync(txs[1]) + _, err = r.BroadcastTxSync(context.Background(), txs[1]) assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxAsync(txs[2]) + _, err = r.BroadcastTxAsync(context.Background(), txs[2]) assert.NotNil(err, "expected err on broadcast") require.Equal(5, len(r.Calls)) @@ -160,14 +165,14 @@ func TestABCIApp(t *testing.T) { m := mock.ABCIApp{app} // get some info - info, err := m.ABCIInfo() + info, err := m.ABCIInfo(context.Background()) require.Nil(err) assert.Equal(`{"size":0}`, info.Response.GetData()) // add a key key, value := "foo", "bar" tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(types.Tx(tx)) + res, err := m.BroadcastTxCommit(context.Background(), types.Tx(tx)) require.Nil(err) assert.True(res.CheckTx.IsOK()) require.NotNil(res.DeliverTx) @@ -180,7 +185,12 @@ func TestABCIApp(t *testing.T) { } // check the key - _qres, err := m.ABCIQueryWithOptions("/key", bytes.HexBytes(key), client.ABCIQueryOptions{Prove: true}) + _qres, err := m.ABCIQueryWithOptions( + context.Background(), + "/key", + bytes.HexBytes(key), + client.ABCIQueryOptions{Prove: true}, + ) qres := _qres.Response require.Nil(err) assert.EqualValues(value, qres.Value) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 869d7b3e9..ed911ec20 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -15,6 +15,7 @@ want to directly call a tendermint node in process, you can use the */ import ( + "context" "reflect" "github.com/tendermint/tendermint/libs/bytes" @@ -22,7 +23,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -79,85 +80,100 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status() (*ctypes.ResultStatus, error) { +func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { return core.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { return core.ABCIInfo(&rpctypes.Context{}) } -func (c Client) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } func (c Client) ABCIQueryWithOptions( + ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return core.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { +func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + return core.CheckTx(&rpctypes.Context{}, tx) +} + +func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { return core.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState() (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { return core.ConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { return core.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return core.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health() (*ctypes.ResultHealth, error) { +func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { return core.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } -func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent) +func (c Client) DialPeers( + ctx context.Context, + peers []string, + persistent, + unconditional, + private bool, +) (*ctypes.ResultDialPeers, error) { + return core.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) } -func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return core.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis() (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { return core.Genesis(&rpctypes.Context{}) } -func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { return core.Block(&rpctypes.Context{}, height) } -func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { +func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + return core.BlockByHash(&rpctypes.Context{}, hash) +} + +func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { return core.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return core.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return core.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 58b29d573..6dd6a8d44 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -1,6 +1,8 @@ package mock import ( + "context" + "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -15,7 +17,7 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status() (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err @@ -41,8 +43,8 @@ func (r *StatusRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) { - res, err := r.Client.Status() +func (r *StatusRecorder) Status(ctx context.Context) (*ctypes.ResultStatus, error) { + res, err := r.Client.Status(ctx) r.addCall(Call{ Name: "status", Response: res, diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index d252a54eb..4c2112b9c 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -1,6 +1,7 @@ package mock_test import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -29,7 +30,7 @@ func TestStatus(t *testing.T) { require.Equal(0, len(r.Calls)) // make sure response works proper - status, err := r.Status() + status, err := r.Status(context.Background()) require.Nil(err, "%+v", err) assert.EqualValues("block", status.SyncInfo.LatestBlockHash) assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go new file mode 100644 index 000000000..6a9008717 --- /dev/null +++ b/rpc/client/mocks/client.go @@ -0,0 +1,780 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + bytes "github.com/tendermint/tendermint/libs/bytes" + client "github.com/tendermint/tendermint/rpc/client" + + context "context" + + coretypes "github.com/tendermint/tendermint/rpc/core/types" + + log "github.com/tendermint/tendermint/libs/log" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/types" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +// ABCIInfo provides a mock function with given fields: _a0 +func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultABCIInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultABCIInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQuery provides a mock function with given fields: ctx, path, data +func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes) error); ok { + r1 = rf(ctx, path, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQueryWithOptions provides a mock function with given fields: ctx, path, data, opts +func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data, opts) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) error); ok { + r1 = rf(ctx, path, data, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Block provides a mock function with given fields: ctx, height +func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlock); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *Client) BlockByHash(ctx context.Context, hash []byte) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, []byte) *coretypes.ResultBlock); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockResults provides a mock function with given fields: ctx, height +func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlockResults + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlockResults); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockResults) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockchainInfo provides a mock function with given fields: ctx, minHeight, maxHeight +func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + ret := _m.Called(ctx, minHeight, maxHeight) + + var r0 *coretypes.ResultBlockchainInfo + if rf, ok := ret.Get(0).(func(context.Context, int64, int64) *coretypes.ResultBlockchainInfo); ok { + r0 = rf(ctx, minHeight, maxHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockchainInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok { + r1 = rf(ctx, minHeight, maxHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastEvidence provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastEvidence + if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastEvidence) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxCommit provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTxCommit + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTxCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxSync provides a mock function with given fields: _a0, _a1 +func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultCheckTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: ctx, height +func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultCommit + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultCommit); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConsensusParams provides a mock function with given fields: ctx, height +func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultConsensusParams + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultConsensusParams); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusParams) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConsensusState provides a mock function with given fields: _a0 +func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DumpConsensusState provides a mock function with given fields: _a0 +func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultDumpConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultDumpConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultDumpConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Genesis provides a mock function with given fields: _a0 +func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultGenesis + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultGenesis); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultGenesis) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Health provides a mock function with given fields: _a0 +func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultHealth + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultHealth); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHealth) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsRunning provides a mock function with given fields: +func (_m *Client) IsRunning() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NetInfo provides a mock function with given fields: _a0 +func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultNetInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultNetInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultNetInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NumUnconfirmedTxs provides a mock function with given fields: _a0 +func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OnReset provides a mock function with given fields: +func (_m *Client) OnReset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStart provides a mock function with given fields: +func (_m *Client) OnStart() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStop provides a mock function with given fields: +func (_m *Client) OnStop() { + _m.Called() +} + +// Quit provides a mock function with given fields: +func (_m *Client) Quit() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Reset provides a mock function with given fields: +func (_m *Client) Reset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetLogger provides a mock function with given fields: _a0 +func (_m *Client) SetLogger(_a0 log.Logger) { + _m.Called(_a0) +} + +// Start provides a mock function with given fields: +func (_m *Client) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with given fields: _a0 +func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultStatus + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultStatus); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Stop provides a mock function with given fields: +func (_m *Client) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *Client) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity +func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { + _va := make([]interface{}, len(outCapacity)) + for _i := range outCapacity { + _va[_i] = outCapacity[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, subscriber, query) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 <-chan coretypes.ResultEvent + if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) <-chan coretypes.ResultEvent); ok { + r0 = rf(ctx, subscriber, query, outCapacity...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan coretypes.ResultEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, ...int) error); ok { + r1 = rf(ctx, subscriber, query, outCapacity...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Tx provides a mock function with given fields: ctx, hash, prove +func (_m *Client) Tx(ctx context.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { + ret := _m.Called(ctx, hash, prove) + + var r0 *coretypes.ResultTx + if rf, ok := ret.Get(0).(func(context.Context, []byte, bool) *coretypes.ResultTx); ok { + r0 = rf(ctx, hash, prove) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []byte, bool) error); ok { + r1 = rf(ctx, hash, prove) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxSearch provides a mock function with given fields: ctx, query, prove, page, perPage, orderBy +func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { + ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + + var r0 *coretypes.ResultTxSearch + if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) *coretypes.ResultTxSearch); ok { + r0 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTxSearch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bool, *int, *int, string) error); ok { + r1 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UnconfirmedTxs provides a mock function with given fields: ctx, limit +func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, limit) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, limit) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { + r1 = rf(ctx, limit) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Unsubscribe provides a mock function with given fields: ctx, subscriber, query +func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query string) error { + ret := _m.Called(ctx, subscriber, query) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, subscriber, query) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnsubscribeAll provides a mock function with given fields: ctx, subscriber +func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { + ret := _m.Called(ctx, subscriber) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, subscriber) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Validators provides a mock function with given fields: ctx, height, page, perPage +func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perPage *int) (*coretypes.ResultValidators, error) { + ret := _m.Called(ctx, height, page, perPage) + + var r0 *coretypes.ResultValidators + if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int) *coretypes.ResultValidators); ok { + r0 = rf(ctx, height, page, perPage) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultValidators) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64, *int, *int) error); ok { + r1 = rf(ctx, height, page, perPage) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 3f9962774..ed719bed0 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,10 +1,9 @@ package client_test import ( - "bytes" + "context" "fmt" "math" - "math/rand" "net/http" "strings" "sync" @@ -15,22 +14,22 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" mempl "github.com/tendermint/tendermint/mempool" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +var ( + ctx = context.Background() +) + func getHTTPClient() *rpchttp.HTTP { rpcAddr := rpctest.GetConfig().RPC.ListenAddress c, err := rpchttp.New(rpcAddr, "/websocket") @@ -68,7 +67,7 @@ func TestNilCustomHTTPClient(t *testing.T) { _, _ = rpchttp.NewWithClient("http://example.com", "/websocket", nil) }) require.Panics(t, func() { - _, _ = rpcclient.NewJSONRPCClientWithHTTPClient("http://example.com", nil) + _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) }) } @@ -76,14 +75,14 @@ func TestCustomHTTPClient(t *testing.T) { remote := rpctest.GetConfig().RPC.ListenAddress c, err := rpchttp.NewWithClient(remote, "/websocket", http.DefaultClient) require.Nil(t, err) - status, err := c.Status() + status, err := c.Status(context.Background()) require.NoError(t, err) require.NotNil(t, status) } func TestCorsEnabled(t *testing.T) { origin := rpctest.GetConfig().RPC.CORSAllowedOrigins[0] - remote := strings.Replace(rpctest.GetConfig().RPC.ListenAddress, "tcp", "http", -1) + remote := strings.ReplaceAll(rpctest.GetConfig().RPC.ListenAddress, "tcp", "http") req, err := http.NewRequest("GET", remote, nil) require.Nil(t, err, "%+v", err) @@ -100,7 +99,7 @@ func TestCorsEnabled(t *testing.T) { func TestStatus(t *testing.T) { for i, c := range GetClients() { moniker := rpctest.GetConfig().Moniker - status, err := c.Status() + status, err := c.Status(context.Background()) require.Nil(t, err, "%d: %+v", i, err) assert.Equal(t, moniker, status.NodeInfo.Moniker) } @@ -111,7 +110,7 @@ func TestInfo(t *testing.T) { for i, c := range GetClients() { // status, err := c.Status() // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo() + info, err := c.ABCIInfo(context.Background()) require.Nil(t, err, "%d: %+v", i, err) // TODO: this is not correct - fix merkleeyes! // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) @@ -123,7 +122,7 @@ func TestNetInfo(t *testing.T) { for i, c := range GetClients() { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo() + netinfo, err := nc.NetInfo(context.Background()) require.Nil(t, err, "%d: %+v", i, err) assert.True(t, netinfo.Listening) assert.Equal(t, 0, len(netinfo.Peers)) @@ -135,7 +134,7 @@ func TestDumpConsensusState(t *testing.T) { // FIXME: fix server so it doesn't panic on invalid input nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState() + cons, err := nc.DumpConsensusState(context.Background()) require.Nil(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) assert.Empty(t, cons.Peers) @@ -147,7 +146,7 @@ func TestConsensusState(t *testing.T) { // FIXME: fix server so it doesn't panic on invalid input nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState() + cons, err := nc.ConsensusState(context.Background()) require.Nil(t, err, "%d: %+v", i, err) assert.NotEmpty(t, cons.RoundState) } @@ -157,7 +156,7 @@ func TestHealth(t *testing.T) { for i, c := range GetClients() { nc, ok := c.(client.NetworkClient) require.True(t, ok, "%d", i) - _, err := nc.Health() + _, err := nc.Health(context.Background()) require.Nil(t, err, "%d: %+v", i, err) } } @@ -166,14 +165,15 @@ func TestGenesisAndValidators(t *testing.T) { for i, c := range GetClients() { // make sure this is the right genesis file - gen, err := c.Genesis() + gen, err := c.Genesis(context.Background()) require.Nil(t, err, "%d: %+v", i, err) // get the genesis validator require.Equal(t, 1, len(gen.Genesis.Validators)) gval := gen.Genesis.Validators[0] // get the current validators - vals, err := c.Validators(nil, 0, 0) + h := int64(1) + vals, err := c.Validators(context.Background(), &h, nil, nil) require.Nil(t, err, "%d: %+v", i, err) require.Equal(t, 1, len(vals.Validators)) require.Equal(t, 1, vals.Count) @@ -190,13 +190,14 @@ func TestABCIQuery(t *testing.T) { for i, c := range GetClients() { // write something k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) + bres, err := c.BroadcastTxCommit(context.Background(), tx) require.Nil(t, err, "%d: %+v", i, err) apph := bres.Height + 1 // this is where the tx will be applied to the state // wait before querying - client.WaitForHeight(c, apph, nil) - res, err := c.ABCIQuery("/key", k) + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(context.Background(), "/key", k) qres := res.Response if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { assert.EqualValues(t, v, qres.Value) @@ -210,50 +211,55 @@ func TestAppCalls(t *testing.T) { for i, c := range GetClients() { // get an offset of height to avoid racing and guessing - s, err := c.Status() - require.Nil(err, "%d: %+v", i, err) + s, err := c.Status(context.Background()) + require.NoError(err) // sh is start height or status height sh := s.SyncInfo.LatestBlockHeight // look for the future - h := sh + 2 - _, err = c.Block(&h) - assert.NotNil(err) // no block yet + h := sh + 20 + _, err = c.Block(context.Background(), &h) + require.Error(err) // no block yet // write something k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(err, "%d: %+v", i, err) + bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(err) require.True(bres.DeliverTx.IsOK()) txh := bres.Height apph := txh + 1 // this is where the tx will be applied to the state // wait before querying - if err := client.WaitForHeight(c, apph, nil); err != nil { - t.Error(err) - } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false}) + err = client.WaitForHeight(c, apph, nil) + require.NoError(err) + + _qres, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(err) qres := _qres.Response - if assert.Nil(err) && assert.True(qres.IsOK()) { + if assert.True(qres.IsOK()) { assert.Equal(k, qres.Key) assert.EqualValues(v, qres.Value) } // make sure we can lookup the tx with proof - ptx, err := c.Tx(bres.Hash, true) - require.Nil(err, "%d: %+v", i, err) + ptx, err := c.Tx(context.Background(), bres.Hash, true) + require.NoError(err) assert.EqualValues(txh, ptx.Height) assert.EqualValues(tx, ptx.Tx) // and we can even check the block is added - block, err := c.Block(&apph) - require.Nil(err, "%d: %+v", i, err) + block, err := c.Block(context.Background(), &apph) + require.NoError(err) appHash := block.Block.Header.AppHash assert.True(len(appHash) > 0) assert.EqualValues(apph, block.Block.Header.Height) + blockByHash, err := c.BlockByHash(context.Background(), block.BlockID.Hash) + require.NoError(err) + require.Equal(block, blockByHash) + // now check the results - blockResults, err := c.BlockResults(&txh) + blockResults, err := c.BlockResults(context.Background(), &txh) require.Nil(err, "%d: %+v", i, err) assert.Equal(txh, blockResults.Height) if assert.Equal(1, len(blockResults.TxsResults)) { @@ -262,8 +268,8 @@ func TestAppCalls(t *testing.T) { } // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(apph, apph) - require.Nil(err, "%d: %+v", i, err) + info, err := c.BlockchainInfo(context.Background(), apph, apph) + require.NoError(err) assert.True(info.LastHeight >= apph) if assert.Equal(1, len(info.BlockMetas)) { lastMeta := info.BlockMetas[0] @@ -274,28 +280,58 @@ func TestAppCalls(t *testing.T) { } // and get the corresponding commit with the same apphash - commit, err := c.Commit(&apph) - require.Nil(err, "%d: %+v", i, err) + commit, err := c.Commit(context.Background(), &apph) + require.NoError(err) cappHash := commit.Header.AppHash assert.Equal(appHash, cappHash) assert.NotNil(commit.Commit) // compare the commits (note Commit(2) has commit from Block(3)) h = apph - 1 - commit2, err := c.Commit(&h) - require.Nil(err, "%d: %+v", i, err) - assert.Equal(block.Block.LastCommit, commit2.Commit) + commit2, err := c.Commit(context.Background(), &h) + require.NoError(err) + assert.Equal(block.Block.LastCommitHash, commit2.Commit.Hash()) // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true}) + _pres, err := c.ABCIQueryWithOptions(context.Background(), "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(err) pres := _pres.Response - assert.Nil(err) assert.True(pres.IsOK()) // XXX Test proof } } +func TestBlockchainInfo(t *testing.T) { + for i, c := range GetClients() { + err := client.WaitForHeight(c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(context.Background(), 0, 0) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(context.Background(), 1, 1) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(context.Background(), 1, 10000) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } + + res, err = c.BlockchainInfo(context.Background(), 10000, 1) + require.NotNil(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + } +} + func TestBroadcastTxSync(t *testing.T) { require := require.New(t) @@ -305,7 +341,7 @@ func TestBroadcastTxSync(t *testing.T) { for i, c := range GetClients() { _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(tx) + bres, err := c.BroadcastTxSync(context.Background(), tx) require.Nil(err, "%d: %+v", i, err) require.Equal(bres.Code, abci.CodeTypeOK) // FIXME @@ -323,7 +359,7 @@ func TestBroadcastTxCommit(t *testing.T) { mempool := node.Mempool() for i, c := range GetClients() { _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) + bres, err := c.BroadcastTxCommit(context.Background(), tx) require.Nil(err, "%d: %+v", i, err) require.True(bres.CheckTx.IsOK()) require.True(bres.DeliverTx.IsOK()) @@ -335,14 +371,23 @@ func TestBroadcastTxCommit(t *testing.T) { func TestUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() + ch := make(chan *abci.Response, 1) mempool := node.Mempool() - _ = mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err := mempool.CheckTx(tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + require.NoError(t, err) - for i, c := range GetClients() { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.UnconfirmedTxs(1) - require.Nil(t, err, "%d: %+v", i, err) + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + for _, c := range GetClients() { + mc := c.(client.MempoolClient) + limit := 1 + res, err := mc.UnconfirmedTxs(context.Background(), &limit) + require.NoError(t, err) assert.Equal(t, 1, res.Count) assert.Equal(t, 1, res.Total) @@ -356,14 +401,23 @@ func TestUnconfirmedTxs(t *testing.T) { func TestNumUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() + ch := make(chan *abci.Response, 1) mempool := node.Mempool() - _ = mempool.CheckTx(tx, nil, mempl.TxInfo{}) - mempoolSize := mempool.Size() + err := mempool.CheckTx(tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + mempoolSize := mempool.Size() for i, c := range GetClients() { mc, ok := c.(client.MempoolClient) require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs() + res, err := mc.NumUnconfirmedTxs(context.Background()) require.Nil(t, err, "%d: %+v", i, err) assert.Equal(t, mempoolSize, res.Count) @@ -374,11 +428,25 @@ func TestNumUnconfirmedTxs(t *testing.T) { mempool.Flush() } +func TestCheckTx(t *testing.T) { + mempool := node.Mempool() + + for _, c := range GetClients() { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(context.Background(), tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, mempool.Size(), "mempool must be empty") + } +} + func TestTx(t *testing.T) { // first we broadcast a tx c := getHTTPClient() _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) + bres, err := c.BroadcastTxCommit(context.Background(), tx) require.Nil(t, err, "%+v", err) txHeight := bres.Height @@ -406,7 +474,7 @@ func TestTx(t *testing.T) { // now we query for the tx. // since there's only one tx, we know index=0. - ptx, err := c.Tx(tc.hash, tc.prove) + ptx, err := c.Tx(context.Background(), tc.hash, tc.prove) if !tc.valid { require.NotNil(t, err) @@ -432,12 +500,14 @@ func TestTxSearchWithTimeout(t *testing.T) { // Get a client with a time-out of 10 secs. timeoutClient := getHTTPClientWithTimeout(10) + _, _, tx := MakeTxKV() + _, err := timeoutClient.BroadcastTxCommit(context.Background(), tx) + require.NoError(t, err) + // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30, "asc") + result, err := timeoutClient.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) - if len(result.Txs) == 0 { - t.Fatal("expected a lot of transactions") - } + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") } func TestTxSearch(t *testing.T) { @@ -446,13 +516,13 @@ func TestTxSearch(t *testing.T) { // first we broadcast a few txs for i := 0; i < 10; i++ { _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(tx) + _, err := c.BroadcastTxCommit(context.Background(), tx) require.NoError(t, err) } // since we're not using an isolated test server, we'll have lingering transactions // from other tests as well - result, err := c.TxSearch("tx.height >= 0", true, 1, 100, "asc") + result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") require.NoError(t, err) txCount := len(result.Txs) @@ -464,7 +534,7 @@ func TestTxSearch(t *testing.T) { t.Logf("client %d", i) // now we query for the tx. - result, err := c.TxSearch(fmt.Sprintf("tx.hash='%v'", find.Hash), true, 1, 30, "asc") + result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") require.Nil(t, err) require.Len(t, result.Txs, 1) require.Equal(t, find.Hash, result.Txs[0].Hash) @@ -482,58 +552,67 @@ func TestTxSearch(t *testing.T) { } // query by height - result, err = c.TxSearch(fmt.Sprintf("tx.height=%d", find.Height), true, 1, 30, "asc") + result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") require.Nil(t, err) require.Len(t, result.Txs, 1) // query for non existing tx - result, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, 1, 30, "asc") + result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") require.Nil(t, err) require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30, "asc") + result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) - if len(result.Txs) == 0 { - t.Fatal("expected a lot of transactions") - } + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch("app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, 1, 30, "asc") + result, err = c.TxSearch(context.Background(), + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") require.Nil(t, err) - if len(result.Txs) == 0 { - t.Fatal("expected a lot of transactions") - } + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") // query a non existing tx with page 1 and txsPerPage 1 - result, err = c.TxSearch("app.creator='Cosmoshi Neetowoko'", true, 1, 1, "asc") + perPage := 1 + result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") require.Nil(t, err) require.Len(t, result.Txs, 0) // check sorting - result, err = c.TxSearch(fmt.Sprintf("tx.height >= 1"), false, 1, 30, "asc") + result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") require.Nil(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) } - result, err = c.TxSearch(fmt.Sprintf("tx.height >= 1"), false, 1, 30, "desc") + result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") require.Nil(t, err) for k := 0; k < len(result.Txs)-1; k++ { require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) } - // check pagination + perPage = 3 var ( seen = map[int64]bool{} maxHeight int64 - perPage = 3 pages = int(math.Ceil(float64(txCount) / float64(perPage))) ) + for page := 1; page <= pages; page++ { - result, err = c.TxSearch("tx.height >= 1", false, page, perPage, "asc") + page := page + result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") require.NoError(t, err) if page < pages { require.Len(t, result.Txs, perPage) @@ -554,152 +633,6 @@ func TestTxSearch(t *testing.T) { } } -func deepcpVote(vote *types.Vote) (res *types.Vote) { - res = &types.Vote{ - ValidatorAddress: make([]byte, len(vote.ValidatorAddress)), - ValidatorIndex: vote.ValidatorIndex, - Height: vote.Height, - Round: vote.Round, - Type: vote.Type, - Timestamp: vote.Timestamp, - BlockID: types.BlockID{ - Hash: make([]byte, len(vote.BlockID.Hash)), - PartsHeader: vote.BlockID.PartsHeader, - }, - Signature: make([]byte, len(vote.Signature)), - } - copy(res.ValidatorAddress, vote.ValidatorAddress) - copy(res.BlockID.Hash, vote.BlockID.Hash) - copy(res.Signature, vote.Signature) - return -} - -func newEvidence( - t *testing.T, - val *privval.FilePV, - vote *types.Vote, - vote2 *types.Vote, - chainID string, -) types.DuplicateVoteEvidence { - var err error - deepcpVote2 := deepcpVote(vote2) - deepcpVote2.Signature, err = val.Key.PrivKey.Sign(deepcpVote2.SignBytes(chainID)) - require.NoError(t, err) - - return *types.NewDuplicateVoteEvidence(val.Key.PubKey, vote, deepcpVote2) -} - -func makeEvidences( - t *testing.T, - val *privval.FilePV, - chainID string, -) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) { - vote := &types.Vote{ - ValidatorAddress: val.Key.Address, - ValidatorIndex: 0, - Height: 1, - Round: 0, - Type: types.PrevoteType, - Timestamp: time.Now().UTC(), - BlockID: types.BlockID{ - Hash: tmhash.Sum([]byte("blockhash")), - PartsHeader: types.PartSetHeader{ - Total: 1000, - Hash: tmhash.Sum([]byte("partset")), - }, - }, - } - - var err error - vote.Signature, err = val.Key.PrivKey.Sign(vote.SignBytes(chainID)) - require.NoError(t, err) - - vote2 := deepcpVote(vote) - vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) - - ev = newEvidence(t, val, vote, vote2, chainID) - - fakes = make([]types.DuplicateVoteEvidence, 42) - - // different address - vote2 = deepcpVote(vote) - for i := 0; i < 10; i++ { - rand.Read(vote2.ValidatorAddress) // nolint: gosec - fakes[i] = newEvidence(t, val, vote, vote2, chainID) - } - // different index - vote2 = deepcpVote(vote) - for i := 10; i < 20; i++ { - vote2.ValidatorIndex = rand.Int()%100 + 1 // nolint: gosec - fakes[i] = newEvidence(t, val, vote, vote2, chainID) - } - // different height - vote2 = deepcpVote(vote) - for i := 20; i < 30; i++ { - vote2.Height = rand.Int63()%1000 + 100 // nolint: gosec - fakes[i] = newEvidence(t, val, vote, vote2, chainID) - } - // different round - vote2 = deepcpVote(vote) - for i := 30; i < 40; i++ { - vote2.Round = rand.Int()%10 + 1 // nolint: gosec - fakes[i] = newEvidence(t, val, vote, vote2, chainID) - } - // different type - vote2 = deepcpVote(vote) - vote2.Type = types.PrecommitType - fakes[40] = newEvidence(t, val, vote, vote2, chainID) - // exactly same vote - vote2 = deepcpVote(vote) - fakes[41] = newEvidence(t, val, vote, vote2, chainID) - return ev, fakes -} - -func TestBroadcastEvidenceDuplicateVote(t *testing.T) { - config := rpctest.GetConfig() - chainID := config.ChainID() - pvKeyFile := config.PrivValidatorKeyFile() - pvKeyStateFile := config.PrivValidatorStateFile() - pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) - - ev, fakes := makeEvidences(t, pv, chainID) - t.Logf("evidence %v", ev) - - for i, c := range GetClients() { - t.Logf("client %d", i) - - result, err := c.BroadcastEvidence(&ev) - require.Nil(t, err) - require.Equal(t, ev.Hash(), result.Hash, "Invalid response, result %+v", result) - - status, err := c.Status() - require.NoError(t, err) - client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) - - ed25519pub := ev.PubKey.(ed25519.PubKeyEd25519) - rawpub := ed25519pub[:] - result2, err := c.ABCIQuery("/val", rawpub) - require.Nil(t, err, "Error querying evidence, err %v", err) - qres := result2.Response - require.True(t, qres.IsOK(), "Response not OK") - - var v abci.ValidatorUpdate - err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) - require.NoError(t, err, "Error reading query result, value %v", qres.Value) - - require.EqualValues(t, rawpub, v.PubKey.Data, "Stored PubKey not equal with expected, value %v", string(qres.Value)) - require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) - - for _, fake := range fakes { - _, err := c.BroadcastEvidence(&types.DuplicateVoteEvidence{ - PubKey: fake.PubKey, - VoteA: fake.VoteA, - VoteB: fake.VoteB}) - require.Error(t, err, "Broadcasting fake evidence succeed: %s", fake.String()) - } - } -} - func TestBatchedJSONRPCCalls(t *testing.T) { c := getHTTPClient() testBatchedJSONRPCCalls(t, c) @@ -710,12 +643,12 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { k2, v2, tx2 := MakeTxKV() batch := c.NewBatch() - r1, err := batch.BroadcastTxCommit(tx1) + r1, err := batch.BroadcastTxCommit(context.Background(), tx1) require.NoError(t, err) - r2, err := batch.BroadcastTxCommit(tx2) + r2, err := batch.BroadcastTxCommit(context.Background(), tx2) require.NoError(t, err) require.Equal(t, 2, batch.Count()) - bresults, err := batch.Send() + bresults, err := batch.Send(ctx) require.NoError(t, err) require.Len(t, bresults, 2) require.Equal(t, 0, batch.Count()) @@ -728,14 +661,15 @@ func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 - client.WaitForHeight(c, apph, nil) + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) - q1, err := batch.ABCIQuery("/key", k1) + q1, err := batch.ABCIQuery(context.Background(), "/key", k1) require.NoError(t, err) - q2, err := batch.ABCIQuery("/key", k2) + q2, err := batch.ABCIQuery(context.Background(), "/key", k2) require.NoError(t, err) require.Equal(t, 2, batch.Count()) - qresults, err := batch.Send() + qresults, err := batch.Send(ctx) require.NoError(t, err) require.Len(t, qresults, 2) require.Equal(t, 0, batch.Count()) @@ -759,9 +693,9 @@ func TestBatchedJSONRPCCallsCancellation(t *testing.T) { _, _, tx2 := MakeTxKV() batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(tx1) + _, err := batch.BroadcastTxCommit(context.Background(), tx1) require.NoError(t, err) - _, err = batch.BroadcastTxCommit(tx2) + _, err = batch.BroadcastTxCommit(context.Background(), tx2) require.NoError(t, err) // we should have 2 requests waiting require.Equal(t, 2, batch.Count()) @@ -771,14 +705,14 @@ func TestBatchedJSONRPCCallsCancellation(t *testing.T) { require.Equal(t, 0, batch.Count()) } -func TestSendingEmptyJSONRPCRequestBatch(t *testing.T) { +func TestSendingEmptyRequestBatch(t *testing.T) { c := getHTTPClient() batch := c.NewBatch() - _, err := batch.Send() + _, err := batch.Send(ctx) require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") } -func TestClearingEmptyJSONRPCRequestBatch(t *testing.T) { +func TestClearingEmptyRequestBatch(t *testing.T) { c := getHTTPClient() batch := c.NewBatch() require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") diff --git a/rpc/core/CONTRIBUTING.md b/rpc/core/CONTRIBUTING.md index 7f7c7e80c..3427a8e96 100644 --- a/rpc/core/CONTRIBUTING.md +++ b/rpc/core/CONTRIBUTING.md @@ -1,4 +1,4 @@ -## Swagger docs +# Openapi docs -Do not forget to update ../swagger/swagger.yaml if making changes to any +Do not forget to update ../openapi/openapi.yaml if making changes to any endpoint. diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 8f135ba26..d1edfdfd7 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -5,7 +5,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // ABCIQuery queries the application for some information. @@ -17,7 +17,7 @@ func ABCIQuery( height int64, prove bool, ) (*ctypes.ResultABCIQuery, error) { - resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ + resQuery, err := env.ProxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, Height: height, @@ -26,14 +26,14 @@ func ABCIQuery( if err != nil { return nil, err } - logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) + env.Logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info func ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(proxy.RequestInfo) + resInfo, err := env.ProxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { return nil, err } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 1d608534a..c5b2d4928 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -5,32 +5,45 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - sm "github.com/tendermint/tendermint/state" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // BlockchainInfo gets block headers for minHeight <= height <= maxHeight. -// Block headers are returned in descending order (highest first). +// +// If maxHeight does not yet exist, blocks up to the current height will be +// returned. If minHeight does not exist (due to pruning), earliest existing +// height will be used. +// +// At most 20 items will be returned. Block headers are returned in descending +// order (highest first). +// // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - // maximum 20 block metas const limit int64 = 20 + var err error - minHeight, maxHeight, err = filterMinMax(blockStore.Base(), blockStore.Height(), minHeight, maxHeight, limit) + minHeight, maxHeight, err = filterMinMax( + env.BlockStore.Base(), + env.BlockStore.Height(), + minHeight, + maxHeight, + limit) if err != nil { return nil, err } - logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) + env.Logger.Debug("BlockchainInfo", "maxHeight", maxHeight, "minHeight", minHeight) - blockMetas := []*types.BlockMeta{} + blockMetas := make([]*types.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { - blockMeta := blockStore.LoadBlockMeta(height) - blockMetas = append(blockMetas, blockMeta) + blockMeta := env.BlockStore.LoadBlockMeta(height) + if blockMeta != nil { + blockMetas = append(blockMetas, blockMeta) + } } return &ctypes.ResultBlockchainInfo{ - LastHeight: blockStore.Height(), + LastHeight: env.BlockStore.Height(), BlockMetas: blockMetas}, nil } @@ -71,13 +84,13 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) + height, err := getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } - block := blockStore.LoadBlock(height) - blockMeta := blockStore.LoadBlockMeta(height) + block := env.BlockStore.LoadBlock(height) + blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil } @@ -87,12 +100,12 @@ func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { - block := blockStore.LoadBlockByHash(hash) + block := env.BlockStore.LoadBlockByHash(hash) if block == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } // If block is not nil, then blockMeta can't be nil. - blockMeta := blockStore.LoadBlockMeta(block.Height) + blockMeta := env.BlockStore.LoadBlockMeta(block.Height) return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } @@ -100,12 +113,12 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) + height, err := getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } - blockMeta := blockStore.LoadBlockMeta(height) + blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { return nil, nil } @@ -113,13 +126,13 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // If the next block has not been committed yet, // use a non-canonical commit - if height == blockStore.Height() { - commit := blockStore.LoadSeenCommit(height) + if height == env.BlockStore.Height() { + commit := env.BlockStore.LoadSeenCommit(height) return ctypes.NewResultCommit(&header, commit, false), nil } // Return the canonical commit (comes from the block at height+1) - commit := blockStore.LoadBlockCommit(height) + commit := env.BlockStore.LoadBlockCommit(height) return ctypes.NewResultCommit(&header, commit, true), nil } @@ -131,12 +144,12 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) + height, err := getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err } - results, err := sm.LoadABCIResponses(stateDB, height) + results, err := env.StateStore.LoadABCIResponses(height) if err != nil { return nil, err } @@ -150,21 +163,3 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates, }, nil } - -func getHeight(currentBase int64, currentHeight int64, heightPtr *int64) (int64, error) { - if heightPtr != nil { - height := *heightPtr - if height <= 0 { - return 0, fmt.Errorf("height must be greater than 0") - } - if height > currentHeight { - return 0, fmt.Errorf("height must be less than or equal to the current blockchain height") - } - if height < currentBase { - return 0, fmt.Errorf("height %v is not available, blocks pruned at height %v", - height, currentBase) - } - return height, nil - } - return currentHeight, nil -} diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index c0561647f..161978d7b 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -10,8 +10,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -69,7 +70,7 @@ func TestBlockchainInfo(t *testing.T) { } func TestBlockResults(t *testing.T) { - results := &sm.ABCIResponses{ + results := &tmstate.ABCIResponses{ DeliverTxs: []*abci.ResponseDeliverTx{ {Code: 0, Data: []byte{0x01}, Log: "ok"}, {Code: 0, Data: []byte{0x02}, Log: "ok"}, @@ -79,9 +80,11 @@ func TestBlockResults(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, } - stateDB = dbm.NewMemDB() - sm.SaveABCIResponses(stateDB, 100, results) - blockStore = mockBlockStore{height: 100} + env = &Environment{} + env.StateStore = sm.NewStore(dbm.NewMemDB()) + err := env.StateStore.SaveABCIResponses(100, results) + require.NoError(t, err) + env.BlockStore = mockBlockStore{height: 100} testCases := []struct { height int64 @@ -119,6 +122,7 @@ type mockBlockStore struct { func (mockBlockStore) Base() int64 { return 1 } func (store mockBlockStore) Height() int64 { return store.height } func (store mockBlockStore) Size() int64 { return store.height } +func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil } func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 10717c8c6..89b5d6a45 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -4,35 +4,32 @@ import ( cm "github.com/tendermint/tendermint/consensus" tmmath "github.com/tendermint/tendermint/libs/math" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - sm "github.com/tendermint/tendermint/state" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. // -// If no height is provided, it will fetch the current validator set. Note the -// validators are sorted by their address - this is the canonical order for the -// validators in the set as used in computing their Merkle root. +// If no height is provided, it will fetch the latest validator set. Note the +// validators are sorted by their voting power - this is the canonical order +// for the validators in the set as used in computing their Merkle root. // // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ctypes.ResultValidators, error) { - // The latest validator that we know is the - // NextValidator of the last block. - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(blockStore.Base(), height, heightPtr) +func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + // The latest validator that we know is the NextValidator of the last block. + height, err := getHeight(latestUncommittedHeight(), heightPtr) if err != nil { return nil, err } - validators, err := sm.LoadValidators(stateDB, height) + validators, err := env.StateStore.LoadValidators(height) if err != nil { return nil, err } totalCount := len(validators.Validators) - perPage = validatePerPage(perPage) - page, err = validatePage(page, perPage, totalCount) + perPage := validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { return nil, err } @@ -53,7 +50,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ct // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - peers := p2pPeers.Peers().List() + peers := env.P2PPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) for i, peer := range peers { peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) @@ -72,7 +69,7 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState } } // Get self round state. - roundState, err := consensusState.GetRoundStateJSON() + roundState, err := env.ConsensusState.GetRoundStateJSON() if err != nil { return nil, err } @@ -86,25 +83,26 @@ func DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. - bz, err := consensusState.GetRoundStateSimpleJSON() + bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{RoundState: bz}, err } -// ConsensusParams gets the consensus parameters at the given block height. -// If no height is provided, it will fetch the current consensus params. +// ConsensusParams gets the consensus parameters at the given block height. +// If no height is provided, it will fetch the latest consensus params. // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { - height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(blockStore.Base(), height, heightPtr) + // The latest consensus params that we know is the consensus params after the + // last block. + height, err := getHeight(latestUncommittedHeight(), heightPtr) if err != nil { return nil, err } - consensusparams, err := sm.LoadConsensusParams(stateDB, height) + consensusParams, err := env.StateStore.LoadConsensusParams(height) if err != nil { return nil, err } return &ctypes.ResultConsensusParams{ BlockHeight: height, - ConsensusParams: consensusparams}, nil + ConsensusParams: consensusParams}, nil } diff --git a/rpc/core/dev.go b/rpc/core/dev.go index 71f284f89..b70f5f1e1 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -1,56 +1,12 @@ package core import ( - "os" - "runtime/pprof" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. func UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { - mempool.Flush() + env.Mempool.Flush() return &ctypes.ResultUnsafeFlushMempool{}, nil } - -var profFile *os.File - -// UnsafeStartCPUProfiler starts a pprof profiler using the given filename. -func UnsafeStartCPUProfiler(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - var err error - profFile, err = os.Create(filename) - if err != nil { - return nil, err - } - err = pprof.StartCPUProfile(profFile) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeStopCPUProfiler stops the running pprof profiler. -func UnsafeStopCPUProfiler(ctx *rpctypes.Context) (*ctypes.ResultUnsafeProfile, error) { - pprof.StopCPUProfile() - if err := profFile.Close(); err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -// UnsafeWriteHeapProfile dumps a heap profile to the given filename. -func UnsafeWriteHeapProfile(ctx *rpctypes.Context, filename string) (*ctypes.ResultUnsafeProfile, error) { - memProfFile, err := os.Create(filename) - if err != nil { - return nil, err - } - if err := pprof.WriteHeapProfile(memProfFile); err != nil { - return nil, err - } - if err := memProfFile.Close(); err != nil { - return nil, err - } - - return &ctypes.ResultUnsafeProfile{}, nil -} diff --git a/rpc/core/doc.go b/rpc/core/doc.go index a51a02982..77ace4e2c 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -1,75 +1,8 @@ /* -# Introduction +Package core defines the Tendermint RPC endpoints. -Tendermint supports the following RPC protocols: - -* URI over HTTP -* JSONRPC over HTTP -* JSONRPC over websockets - -Tendermint RPC is built using our own RPC library which contains its own set of documentation and tests. -See it here: https://github.com/tendermint/tendermint/tree/master/rpc/lib - -## Configuration - -RPC can be configured by tuning parameters under `[rpc]` table in the `$TMHOME/config/config.toml` file -or by using the `--rpc.X` command-line flags. - -Default rpc listen address is `tcp://0.0.0.0:26657`. -To set another address, set the `laddr` config parameter to desired value. -CORS (Cross-Origin Resource Sharing) can be enabled by setting -`cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters. - -## Arguments - -Arguments which expect strings or byte arrays may be passed as quoted strings, -like `"abc"` or as `0x`-prefixed strings, like `0x616263`. - -## URI/HTTP - -```bash -curl 'localhost:26657/broadcast_tx_sync?tx="abc"' -``` - -> Response: - -```json -{ - "error": "", - "result": { - "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", - "log": "", - "data": "", - "code": "0" - }, - "id": "", - "jsonrpc": "2.0" -} -``` - -## JSONRPC/HTTP - -JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). - -```json -{ - "method": "broadcast_tx_sync", - "jsonrpc": "2.0", - "params": [ "abc" ], - "id": "dontcare" -} -``` - -## JSONRPC/websockets - -JSONRPC requests can be made via websocket. -The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. -Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets. - - -## More Examples - -See the various bash tests using curl in `test/`, and examples using the `Go` API in `rpc/client/`. +Tendermint ships with its own JSONRPC library - +https://github.com/tendermint/tendermint/tree/master/rpc/jsonrpc. ## Get the list @@ -92,7 +25,6 @@ Available endpoints: /health /unconfirmed_txs /unsafe_flush_mempool -/unsafe_stop_cpu_profiler /validators Endpoints that require arguments: @@ -107,11 +39,7 @@ Endpoints that require arguments: /dial_persistent_peers?persistent_peers=_ /subscribe?event=_ /tx?hash=_&prove=_ -/unsafe_start_cpu_profiler?filename=_ -/unsafe_write_heap_profile?filename=_ /unsubscribe?event=_ ``` - -# Endpoints */ package core diff --git a/rpc/core/env.go b/rpc/core/env.go new file mode 100644 index 000000000..ccc46d5a3 --- /dev/null +++ b/rpc/core/env.go @@ -0,0 +1,167 @@ +package core + +import ( + "fmt" + "time" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/libs/log" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/types" +) + +const ( + // see README + defaultPerPage = 30 + maxPerPage = 100 + + // SubscribeTimeout is the maximum time we wait to subscribe for an event. + // must be less than the server's write timeout (see rpcserver.DefaultConfig) + SubscribeTimeout = 5 * time.Second +) + +var ( + // set by Node + env *Environment +) + +// SetEnvironment sets up the given Environment. +// It will race if multiple Node call SetEnvironment. +func SetEnvironment(e *Environment) { + env = e +} + +//---------------------------------------------- +// These interfaces are used by RPC and must be thread safe + +type Consensus interface { + GetState() sm.State + GetValidators() (int64, []*types.Validator) + GetLastHeight() int64 + GetRoundStateJSON() ([]byte, error) + GetRoundStateSimpleJSON() ([]byte, error) +} + +type transport interface { + Listeners() []string + IsListening() bool + NodeInfo() p2p.NodeInfo +} + +type peers interface { + AddPersistentPeers([]string) error + AddUnconditionalPeerIDs([]string) error + AddPrivatePeerIDs([]string) error + DialPeersAsync([]string) error + Peers() p2p.IPeerSet +} + +//---------------------------------------------- +// Environment contains objects and interfaces used by the RPC. It is expected +// to be setup once during startup. +type Environment struct { + // external, thread safe interfaces + ProxyAppQuery proxy.AppConnQuery + ProxyAppMempool proxy.AppConnMempool + + // interfaces defined in types and above + StateStore sm.Store + BlockStore sm.BlockStore + EvidencePool sm.EvidencePool + ConsensusState Consensus + P2PPeers peers + P2PTransport transport + + // objects + PubKey crypto.PubKey + GenDoc *types.GenesisDoc // cache the genesis structure + TxIndexer txindex.TxIndexer + ConsensusReactor *consensus.Reactor + EventBus *types.EventBus // thread safe + Mempool mempl.Mempool + + Logger log.Logger + + Config cfg.RPCConfig +} + +//---------------------------------------------- + +func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { + if perPage < 1 { + panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) + } + + if pagePtr == nil { // no page parameter + return 1, nil + } + + pages := ((totalCount - 1) / perPage) + 1 + if pages == 0 { + pages = 1 // one page (even if it's empty) + } + page := *pagePtr + if page <= 0 || page > pages { + return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) + } + + return page, nil +} + +func validatePerPage(perPagePtr *int) int { + if perPagePtr == nil { // no per_page parameter + return defaultPerPage + } + + perPage := *perPagePtr + if perPage < 1 { + return defaultPerPage + } else if perPage > maxPerPage { + return maxPerPage + } + return perPage +} + +func validateSkipCount(page, perPage int) int { + skipCount := (page - 1) * perPage + if skipCount < 0 { + return 0 + } + + return skipCount +} + +// latestHeight can be either latest committed or uncommitted (+1) height. +func getHeight(latestHeight int64, heightPtr *int64) (int64, error) { + if heightPtr != nil { + height := *heightPtr + if height <= 0 { + return 0, fmt.Errorf("height must be greater than 0, but got %d", height) + } + if height > latestHeight { + return 0, fmt.Errorf("height %d must be less than or equal to the current blockchain height %d", + height, latestHeight) + } + base := env.BlockStore.Base() + if height < base { + return 0, fmt.Errorf("height %v is not available, lowest height is %v", + height, base) + } + return height, nil + } + return latestHeight, nil +} + +func latestUncommittedHeight() int64 { + nodeIsSyncing := env.ConsensusReactor.WaitSync() + if nodeIsSyncing { + return env.BlockStore.Height() + } + return env.BlockStore.Height() + 1 +} diff --git a/rpc/core/pipe_test.go b/rpc/core/env_test.go similarity index 81% rename from rpc/core/pipe_test.go rename to rpc/core/env_test.go index 93aff3e58..b44c21a4c 100644 --- a/rpc/core/pipe_test.go +++ b/rpc/core/env_test.go @@ -8,7 +8,6 @@ import ( ) func TestPaginationPage(t *testing.T) { - cases := []struct { totalCount int perPage int @@ -41,7 +40,7 @@ func TestPaginationPage(t *testing.T) { } for _, c := range cases { - p, err := validatePage(c.page, c.perPage, c.totalCount) + p, err := validatePage(&c.page, c.perPage, c.totalCount) if c.expErr { assert.Error(t, err) continue @@ -50,6 +49,11 @@ func TestPaginationPage(t *testing.T) { assert.Equal(t, c.newPage, p, fmt.Sprintf("%v", c)) } + // nil case + p, err := validatePage(nil, 1, 1) + if assert.NoError(t, err) { + assert.Equal(t, 1, p) + } } func TestPaginationPerPage(t *testing.T) { @@ -68,7 +72,11 @@ func TestPaginationPerPage(t *testing.T) { } for _, c := range cases { - p := validatePerPage(c.perPage) + p := validatePerPage(&c.perPage) assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c)) } + + // nil case + p := validatePerPage(nil) + assert.Equal(t, defaultPerPage, p) } diff --git a/rpc/core/events.go b/rpc/core/events.go index 7802f160e..5e6b3db57 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -3,13 +3,12 @@ package core import ( "context" "fmt" - - "github.com/pkg/errors" + "time" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -22,23 +21,23 @@ const ( func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() - if eventBus.NumClients() >= config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", config.MaxSubscriptionClients) - } else if eventBus.NumClientSubscriptions(addr) >= config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", config.MaxSubscriptionsPerClient) + if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) + } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) } - logger.Info("Subscribe to query", "remote", addr, "query", query) + env.Logger.Info("Subscribe to query", "remote", addr, "query", query) q, err := tmquery.New(query) if err != nil { - return nil, errors.Wrap(err, "failed to parse query") + return nil, fmt.Errorf("failed to parse query: %w", err) } subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() - sub, err := eventBus.Subscribe(subCtx, addr, q, subBufferSize) + sub, err := env.EventBus.Subscribe(subCtx, addr, q, subBufferSize) if err != nil { return nil, err } @@ -49,13 +48,16 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er for { select { case msg := <-sub.Out(): - resultEvent := &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} - ctx.WSConn.TryWriteRPCResponse( - rpctypes.NewRPCSuccessResponse( - ctx.WSConn.Codec(), - subscriptionID, - resultEvent, - )) + var ( + resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} + resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) + ) + writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { + env.Logger.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } case <-sub.Cancelled(): if sub.Err() != tmpubsub.ErrUnsubscribed { var reason string @@ -64,11 +66,14 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er } else { reason = sub.Err().Error() } - ctx.WSConn.TryWriteRPCResponse( - rpctypes.RPCServerError( - subscriptionID, - fmt.Errorf("subscription was cancelled (reason: %s)", reason), - )) + var ( + err = fmt.Errorf("subscription was cancelled (reason: %s)", reason) + resp = rpctypes.RPCServerError(subscriptionID, err) + ) + if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok { + env.Logger.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } } return } @@ -82,12 +87,12 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() - logger.Info("Unsubscribe from query", "remote", addr, "query", query) + env.Logger.Info("Unsubscribe from query", "remote", addr, "query", query) q, err := tmquery.New(query) if err != nil { - return nil, errors.Wrap(err, "failed to parse query") + return nil, fmt.Errorf("failed to parse query: %w", err) } - err = eventBus.Unsubscribe(context.Background(), addr, q) + err = env.EventBus.Unsubscribe(context.Background(), addr, q) if err != nil { return nil, err } @@ -98,8 +103,8 @@ func Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all func UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() - logger.Info("Unsubscribe from all", "remote", addr) - err := eventBus.UnsubscribeAll(context.Background(), addr) + env.Logger.Info("Unsubscribe from all", "remote", addr) + err := env.EventBus.UnsubscribeAll(context.Background(), addr) if err != nil { return nil, err } diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 7d7ac2ec7..52990526d 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -1,18 +1,27 @@ package core import ( - "github.com/tendermint/tendermint/evidence" + "errors" + "fmt" + ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // BroadcastEvidence broadcasts evidence of the misbehavior. -// More: https://docs.tendermint.com/master/rpc/#/Info/broadcast_evidence +// More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - err := evidencePool.AddEvidence(ev) - if _, ok := err.(evidence.ErrEvidenceAlreadyStored); err == nil || ok { - return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + if ev == nil { + return nil, errors.New("no evidence was provided") + } + + if err := ev.ValidateBasic(); err != nil { + return nil, fmt.Errorf("evidence.ValidateBasic failed: %w", err) + } + + if err := env.EvidencePool.AddEvidence(ev); err != nil { + return nil, fmt.Errorf("failed to add evidence: %w", err) } - return nil, err + return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } diff --git a/rpc/core/health.go b/rpc/core/health.go index eb715bea0..97ea56865 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -2,7 +2,7 @@ package core import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 0e6ce7a2c..79aa03bc5 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -2,15 +2,14 @@ package core import ( "context" + "errors" "fmt" "time" - "github.com/pkg/errors" - abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/mempool" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -21,7 +20,7 @@ import ( // CheckTx nor DeliverTx results. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := mempool.CheckTx(tx, nil, mempl.TxInfo{}) + err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) if err != nil { return nil, err @@ -34,7 +33,7 @@ func BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadca // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) - err := mempool.CheckTx(tx, func(res *abci.Response) { + err := env.Mempool.CheckTx(tx, func(res *abci.Response) { resCh <- res }, mempl.TxInfo{}) if err != nil { @@ -56,31 +55,35 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := ctx.RemoteAddr() - if eventBus.NumClients() >= config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", config.MaxSubscriptionClients) - } else if eventBus.NumClientSubscriptions(subscriber) >= config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", config.MaxSubscriptionsPerClient) + if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) + } else if env.EventBus.NumClientSubscriptions(subscriber) >= env.Config.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) } // Subscribe to tx being committed in block. subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() q := types.EventQueryTxFor(tx) - deliverTxSub, err := eventBus.Subscribe(subCtx, subscriber, q) + deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) if err != nil { - err = errors.Wrap(err, "failed to subscribe to tx") - logger.Error("Error on broadcast_tx_commit", "err", err) + err = fmt.Errorf("failed to subscribe to tx: %w", err) + env.Logger.Error("Error on broadcast_tx_commit", "err", err) return nil, err } - defer eventBus.Unsubscribe(context.Background(), subscriber, q) + defer func() { + if err := env.EventBus.Unsubscribe(context.Background(), subscriber, q); err != nil { + env.Logger.Error("Error unsubscribing from eventBus", "err", err) + } + }() // Broadcast tx and wait for CheckTx result checkTxResCh := make(chan *abci.Response, 1) - err = mempool.CheckTx(tx, func(res *abci.Response) { + err = env.Mempool.CheckTx(tx, func(res *abci.Response) { checkTxResCh <- res }, mempl.TxInfo{}) if err != nil { - logger.Error("Error on broadcastTxCommit", "err", err) + env.Logger.Error("Error on broadcastTxCommit", "err", err) return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) } checkTxResMsg := <-checkTxResCh @@ -111,15 +114,15 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc reason = deliverTxSub.Err().Error() } err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) - logger.Error("Error on broadcastTxCommit", "err", err) + env.Logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), }, err - case <-time.After(config.TimeoutBroadcastTxCommit): + case <-time.After(env.Config.TimeoutBroadcastTxCommit): err = errors.New("timed out waiting for tx to be included in a block") - logger.Error("Error on broadcastTxCommit", "err", err) + env.Logger.Error("Error on broadcastTxCommit", "err", err) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, @@ -131,15 +134,15 @@ func BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadc // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmedTxs, error) { +func UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator - limit = validatePerPage(limit) + limit := validatePerPage(limitPtr) - txs := mempool.ReapMaxTxs(limit) + txs := env.Mempool.ReapMaxTxs(limit) return &ctypes.ResultUnconfirmedTxs{ Count: len(txs), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes(), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.TxsBytes(), Txs: txs}, nil } @@ -147,7 +150,18 @@ func UnconfirmedTxs(ctx *rpctypes.Context, limit int) (*ctypes.ResultUnconfirmed // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs func NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ - Count: mempool.Size(), - Total: mempool.Size(), - TotalBytes: mempool.TxsBytes()}, nil + Count: env.Mempool.Size(), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.TxsBytes()}, nil +} + +// CheckTx checks the transaction without executing it. The transaction won't +// be added to the mempool either. +// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx +func CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + res, err := env.ProxyAppMempool.CheckTxSync(abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil } diff --git a/rpc/core/net.go b/rpc/core/net.go index 4a3d67d4f..a8aedf9e0 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -1,19 +1,19 @@ package core import ( + "errors" "fmt" - - "github.com/pkg/errors" + "strings" "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - peersList := p2pPeers.Peers().List() + peersList := env.P2PPeers.Peers().List() peers := make([]ctypes.Peer, 0, len(peersList)) for _, peer := range peersList { nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo) @@ -31,8 +31,8 @@ func NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { // PRO: useful info // CON: privacy return &ctypes.ResultNetInfo{ - Listening: p2pTransport.IsListening(), - Listeners: p2pTransport.Listeners(), + Listening: env.P2PTransport.IsListening(), + Listeners: env.P2PTransport.Listeners(), NPeers: len(peers), Peers: peers, }, nil @@ -43,8 +43,8 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided") } - logger.Info("DialSeeds", "seeds", seeds) - if err := p2pPeers.DialPeersAsync(seeds); err != nil { + env.Logger.Info("DialSeeds", "seeds", seeds) + if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { return &ctypes.ResultDialSeeds{}, err } return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil @@ -52,24 +52,62 @@ func UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialS // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), // optionally making them persistent. -func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { +func UnsafeDialPeers(ctx *rpctypes.Context, peers []string, persistent, unconditional, private bool) ( + *ctypes.ResultDialPeers, error) { if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("no peers provided") } - logger.Info("DialPeers", "peers", peers, "persistent", persistent) + + ids, err := getIDs(peers) + if err != nil { + return &ctypes.ResultDialPeers{}, err + } + + env.Logger.Info("DialPeers", "peers", peers, "persistent", + persistent, "unconditional", unconditional, "private", private) + if persistent { - if err := p2pPeers.AddPersistentPeers(peers); err != nil { + if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { return &ctypes.ResultDialPeers{}, err } } - if err := p2pPeers.DialPeersAsync(peers); err != nil { + + if private { + if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { + return &ctypes.ResultDialPeers{}, err + } + } + + if unconditional { + if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { + return &ctypes.ResultDialPeers{}, err + } + } + + if err := env.P2PPeers.DialPeersAsync(peers); err != nil { return &ctypes.ResultDialPeers{}, err } + return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil } // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis func Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - return &ctypes.ResultGenesis{Genesis: genDoc}, nil + return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil +} + +func getIDs(peers []string) ([]string, error) { + ids := make([]string, 0, len(peers)) + + for _, peer := range peers { + + spl := strings.Split(peer, "@") + if len(spl) != 2 { + return nil, p2p.ErrNetAddressNoID{Addr: peer} + } + ids = append(ids, spl[0]) + + } + return ids, nil } diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go index 651e1f69d..c971776f3 100644 --- a/rpc/core/net_test.go +++ b/rpc/core/net_test.go @@ -9,7 +9,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestUnsafeDialSeeds(t *testing.T) { @@ -17,10 +17,14 @@ func TestUnsafeDialSeeds(t *testing.T) { func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) - logger = log.TestingLogger() - p2pPeers = sw + env.Logger = log.TestingLogger() + env.P2PPeers = sw testCases := []struct { seeds []string @@ -45,24 +49,34 @@ func TestUnsafeDialSeeds(t *testing.T) { func TestUnsafeDialPeers(t *testing.T) { sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) + sw.SetAddrBook(&p2p.AddrBookMock{ + Addrs: make(map[string]struct{}), + OurAddrs: make(map[string]struct{}), + PrivateAddrs: make(map[string]struct{}), + }) err := sw.Start() require.NoError(t, err) - defer sw.Stop() + t.Cleanup(func() { + if err := sw.Stop(); err != nil { + t.Error(err) + } + }) - logger = log.TestingLogger() - p2pPeers = sw + env.Logger = log.TestingLogger() + env.P2PPeers = sw testCases := []struct { - peers []string - isErr bool + peers []string + persistence, unconditional, private bool + isErr bool }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, + {[]string{}, false, false, false, true}, + {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, + {[]string{"127.0.0.1:41198"}, true, true, false, true}, } for _, tc := range testCases { - res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, false) + res, err := UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) if tc.isErr { assert.Error(t, err) } else { diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go deleted file mode 100644 index 4fb3b9b13..000000000 --- a/rpc/core/pipe.go +++ /dev/null @@ -1,180 +0,0 @@ -package core - -import ( - "fmt" - "time" - - dbm "github.com/tendermint/tm-db" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/libs/log" - mempl "github.com/tendermint/tendermint/mempool" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" -) - -const ( - // see README - defaultPerPage = 30 - maxPerPage = 100 - - // SubscribeTimeout is the maximum time we wait to subscribe for an event. - // must be less than the server's write timeout (see rpcserver.DefaultConfig) - SubscribeTimeout = 5 * time.Second -) - -//---------------------------------------------- -// These interfaces are used by RPC and must be thread safe - -type Consensus interface { - GetState() sm.State - GetValidators() (int64, []*types.Validator) - GetLastHeight() int64 - GetRoundStateJSON() ([]byte, error) - GetRoundStateSimpleJSON() ([]byte, error) -} - -type transport interface { - Listeners() []string - IsListening() bool - NodeInfo() p2p.NodeInfo -} - -type peers interface { - AddPersistentPeers([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet -} - -//---------------------------------------------- -// These package level globals come with setters -// that are expected to be called only once, on startup - -var ( - // external, thread safe interfaces - proxyAppQuery proxy.AppConnQuery - - // interfaces defined in types and above - stateDB dbm.DB - blockStore sm.BlockStore - evidencePool sm.EvidencePool - consensusState Consensus - p2pPeers peers - p2pTransport transport - - // objects - pubKey crypto.PubKey - genDoc *types.GenesisDoc // cache the genesis structure - txIndexer txindex.TxIndexer - consensusReactor *consensus.Reactor - eventBus *types.EventBus // thread safe - mempool mempl.Mempool - - logger log.Logger - - config cfg.RPCConfig -) - -func SetStateDB(db dbm.DB) { - stateDB = db -} - -func SetBlockStore(bs sm.BlockStore) { - blockStore = bs -} - -func SetMempool(mem mempl.Mempool) { - mempool = mem -} - -func SetEvidencePool(evpool sm.EvidencePool) { - evidencePool = evpool -} - -func SetConsensusState(cs Consensus) { - consensusState = cs -} - -func SetP2PPeers(p peers) { - p2pPeers = p -} - -func SetP2PTransport(t transport) { - p2pTransport = t -} - -func SetPubKey(pk crypto.PubKey) { - pubKey = pk -} - -func SetGenesisDoc(doc *types.GenesisDoc) { - genDoc = doc -} - -func SetProxyAppQuery(appConn proxy.AppConnQuery) { - proxyAppQuery = appConn -} - -func SetTxIndexer(indexer txindex.TxIndexer) { - txIndexer = indexer -} - -func SetConsensusReactor(conR *consensus.Reactor) { - consensusReactor = conR -} - -func SetLogger(l log.Logger) { - logger = l -} - -func SetEventBus(b *types.EventBus) { - eventBus = b -} - -// SetConfig sets an RPCConfig. -func SetConfig(c cfg.RPCConfig) { - config = c -} - -func validatePage(page, perPage, totalCount int) (int, error) { - if perPage < 1 { - panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) - } - - if page == 0 { - return 1, nil // default - } - - pages := ((totalCount - 1) / perPage) + 1 - if pages == 0 { - pages = 1 // one page (even if it's empty) - } - if page < 0 || page > pages { - return 1, fmt.Errorf("page should be within [0, %d] range, given %d", pages, page) - } - - return page, nil -} - -func validatePerPage(perPage int) int { - if perPage < 1 { - return defaultPerPage - } else if perPage > maxPerPage { - return maxPerPage - } - return perPage -} - -func validateSkipCount(page, perPage int) int { - skipCount := (page - 1) * perPage - if skipCount < 0 { - return 0 - } - - return skipCount -} diff --git a/rpc/core/routes.go b/rpc/core/routes.go index aa0403f87..639a4be93 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -1,12 +1,12 @@ package core import ( - rpc "github.com/tendermint/tendermint/rpc/lib/server" + rpc "github.com/tendermint/tendermint/rpc/jsonrpc/server" ) // TODO: better system than "unsafe" prefix -// NOTE: Amino is registered in rpc/core/types/codec.go. +// Routes is a map of available routes. var Routes = map[string]*rpc.RPCFunc{ // subscribe/unsubscribe are reserved for websocket events. "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), @@ -23,6 +23,7 @@ var Routes = map[string]*rpc.RPCFunc{ "block_by_hash": rpc.NewRPCFunc(BlockByHash, "hash"), "block_results": rpc.NewRPCFunc(BlockResults, "height"), "commit": rpc.NewRPCFunc(Commit, "height"), + "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), "tx": rpc.NewRPCFunc(Tx, "hash,prove"), "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), @@ -45,14 +46,10 @@ var Routes = map[string]*rpc.RPCFunc{ "broadcast_evidence": rpc.NewRPCFunc(BroadcastEvidence, "evidence"), } +// AddUnsafeRoutes adds unsafe routes. func AddUnsafeRoutes() { // control API Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds") - Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent") + Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent,unconditional,private") Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "") - - // profiler API - Routes["unsafe_start_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStartCPUProfiler, "filename") - Routes["unsafe_stop_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStopCPUProfiler, "") - Routes["unsafe_write_heap_profile"] = rpc.NewRPCFunc(UnsafeWriteHeapProfile, "filename") } diff --git a/rpc/core/status.go b/rpc/core/status.go index 4e950d4a3..72f50f546 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -1,14 +1,12 @@ package core import ( - "bytes" "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - sm "github.com/tendermint/tendermint/state" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -17,46 +15,44 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/status func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var ( - earliestBlockMeta *types.BlockMeta + earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes earliestAppHash tmbytes.HexBytes earliestBlockTimeNano int64 ) - earliestBlockHeight := blockStore.Base() - earliestBlockMeta = blockStore.LoadBlockMeta(earliestBlockHeight) - if earliestBlockMeta != nil { + + if earliestBlockMeta := env.BlockStore.LoadBaseMeta(); earliestBlockMeta != nil { + earliestBlockHeight = earliestBlockMeta.Header.Height earliestAppHash = earliestBlockMeta.Header.AppHash earliestBlockHash = earliestBlockMeta.BlockID.Hash earliestBlockTimeNano = earliestBlockMeta.Header.Time.UnixNano() } - var latestHeight int64 - if consensusReactor.FastSync() { - latestHeight = blockStore.Height() - } else { - latestHeight = consensusState.GetLastHeight() - } - var ( - latestBlockMeta *types.BlockMeta latestBlockHash tmbytes.HexBytes latestAppHash tmbytes.HexBytes latestBlockTimeNano int64 + + latestHeight = env.BlockStore.Height() ) + if latestHeight != 0 { - latestBlockMeta = blockStore.LoadBlockMeta(latestHeight) - latestBlockHash = latestBlockMeta.BlockID.Hash - latestAppHash = latestBlockMeta.Header.AppHash - latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() + if latestBlockMeta := env.BlockStore.LoadBlockMeta(latestHeight); latestBlockMeta != nil { + latestBlockHash = latestBlockMeta.BlockID.Hash + latestAppHash = latestBlockMeta.Header.AppHash + latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() + } } + // Return the very last voting power, not the voting power of this validator + // during the last block. var votingPower int64 - if val := validatorAtHeight(latestHeight); val != nil { + if val := validatorAtHeight(latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } result := &ctypes.ResultStatus{ - NodeInfo: p2pTransport.NodeInfo().(p2p.DefaultNodeInfo), + NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, @@ -66,11 +62,11 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { EarliestAppHash: earliestAppHash, EarliestBlockHeight: earliestBlockHeight, EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - CatchingUp: consensusReactor.FastSync(), + CatchingUp: env.ConsensusReactor.WaitSync(), }, ValidatorInfo: ctypes.ValidatorInfo{ - Address: pubKey.Address(), - PubKey: pubKey, + Address: env.PubKey.Address(), + PubKey: env.PubKey, VotingPower: votingPower, }, } @@ -79,27 +75,11 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { } func validatorAtHeight(h int64) *types.Validator { - privValAddress := pubKey.Address() - - // If we're still at height h, search in the current validator set. - lastBlockHeight, vals := consensusState.GetValidators() - if lastBlockHeight == h { - for _, val := range vals { - if bytes.Equal(val.Address, privValAddress) { - return val - } - } - } - - // If we've moved to the next height, retrieve the validator set from DB. - if lastBlockHeight > h { - vals, err := sm.LoadValidators(stateDB, h) - if err != nil { - return nil // should not happen - } - _, val := vals.GetByAddress(privValAddress) - return val + vals, err := env.StateStore.LoadValidators(h) + if err != nil { + return nil } - - return nil + privValAddress := env.PubKey.Address() + _, val := vals.GetByAddress(privValAddress) + return val } diff --git a/rpc/core/tx.go b/rpc/core/tx.go index e7e2582f6..53e32d9ef 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -1,15 +1,14 @@ package core import ( + "errors" "fmt" "sort" - "github.com/pkg/errors" - tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" ) @@ -20,11 +19,11 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/tx func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error - if _, ok := txIndexer.(*null.TxIndex); ok { + if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, fmt.Errorf("transaction indexing is disabled") } - r, err := txIndexer.Get(hash) + r, err := env.TxIndexer.Get(hash) if err != nil { return nil, err } @@ -38,7 +37,7 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error var proof types.TxProof if prove { - block := blockStore.LoadBlock(height) + block := env.BlockStore.LoadBlock(height) proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } @@ -55,10 +54,10 @@ func Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error // TxSearch allows you to query for multiple transactions results. It returns a // list of transactions (maximum ?per_page entries) and the total count. // More: https://docs.tendermint.com/master/rpc/#/Info/tx_search -func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int, orderBy string) ( +func TxSearch(ctx *rpctypes.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) ( *ctypes.ResultTxSearch, error) { // if index is disabled, return error - if _, ok := txIndexer.(*null.TxIndex); ok { + if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, errors.New("transaction indexing is disabled") } @@ -67,7 +66,7 @@ func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int return nil, err } - results, err := txIndexer.Search(ctx.Context(), q) + results, err := env.TxIndexer.Search(ctx.Context(), q) if err != nil { return nil, err } @@ -94,8 +93,8 @@ func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int // paginate results totalCount := len(results) - perPage = validatePerPage(perPage) - page, err = validatePage(page, perPage, totalCount) + perPage := validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { return nil, err } @@ -108,12 +107,12 @@ func TxSearch(ctx *rpctypes.Context, query string, prove bool, page, perPage int var proof types.TxProof if prove { - block := blockStore.LoadBlock(r.Height) + block := env.BlockStore.LoadBlock(r.Height) proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } apiResults = append(apiResults, &ctypes.ResultTx{ - Hash: r.Tx.Hash(), + Hash: types.Tx(r.Tx).Hash(), Height: r.Height, Index: r.Index, TxResult: r.Result, diff --git a/rpc/core/types/codec.go b/rpc/core/types/codec.go deleted file mode 100644 index 8e0b5303f..000000000 --- a/rpc/core/types/codec.go +++ /dev/null @@ -1,12 +0,0 @@ -package coretypes - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -func RegisterAmino(cdc *amino.Codec) { - types.RegisterEventDatas(cdc) - types.RegisterBlockAmino(cdc) -} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index e5b7b9819..0327e6ee7 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -7,8 +7,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -134,8 +134,8 @@ type ResultValidators struct { // ConsensusParams for given height type ResultConsensusParams struct { - BlockHeight int64 `json:"block_height"` - ConsensusParams types.ConsensusParams `json:"consensus_params"` + BlockHeight int64 `json:"block_height"` + ConsensusParams tmproto.ConsensusParams `json:"consensus_params"` } // Info about the consensus state. @@ -174,6 +174,11 @@ type ResultBroadcastTxCommit struct { Height int64 `json:"height"` } +// ResultCheckTx wraps abci.ResponseCheckTx. +type ResultCheckTx struct { + abci.ResponseCheckTx +} + // Result of querying for a tx type ResultTx struct { Hash bytes.HexBytes `json:"hash"` diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 8216c192e..62c6b66c1 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -5,7 +5,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" core "github.com/tendermint/tendermint/rpc/core" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) type broadcastAPI struct { diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index f7fdf6b53..13b2e71a0 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -1,15 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rpc/grpc/types.proto +// source: tendermint/rpc/grpc/types.proto package coregrpc import ( - bytes "bytes" context "context" fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" types "github.com/tendermint/tendermint/abci/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -21,7 +18,6 @@ import ( // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal -var _ = golang_proto.Marshal var _ = fmt.Errorf var _ = math.Inf @@ -32,16 +28,13 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type RequestPing struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *RequestPing) Reset() { *m = RequestPing{} } func (m *RequestPing) String() string { return proto.CompactTextString(m) } func (*RequestPing) ProtoMessage() {} func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{0} + return fileDescriptor_0ffff5682c662b95, []int{0} } func (m *RequestPing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -71,17 +64,14 @@ func (m *RequestPing) XXX_DiscardUnknown() { var xxx_messageInfo_RequestPing proto.InternalMessageInfo type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` } func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } func (*RequestBroadcastTx) ProtoMessage() {} func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{1} + return fileDescriptor_0ffff5682c662b95, []int{1} } func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -118,16 +108,13 @@ func (m *RequestBroadcastTx) GetTx() []byte { } type ResponsePing struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } func (m *ResponsePing) Reset() { *m = ResponsePing{} } func (m *ResponsePing) String() string { return proto.CompactTextString(m) } func (*ResponsePing) ProtoMessage() {} func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{2} + return fileDescriptor_0ffff5682c662b95, []int{2} } func (m *ResponsePing) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -157,18 +144,15 @@ func (m *ResponsePing) XXX_DiscardUnknown() { var xxx_messageInfo_ResponsePing proto.InternalMessageInfo type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` + DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` } func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } func (*ResponseBroadcastTx) ProtoMessage() {} func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_15f63baabf91876a, []int{3} + return fileDescriptor_0ffff5682c662b95, []int{3} } func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -213,148 +197,35 @@ func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { func init() { proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - golang_proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - golang_proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - golang_proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") - golang_proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") } -func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_15f63baabf91876a) } -func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_15f63baabf91876a) } - -var fileDescriptor_15f63baabf91876a = []byte{ - // 344 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xb1, 0x4e, 0xf3, 0x30, - 0x14, 0x85, 0xe5, 0xea, 0xd7, 0x0f, 0xdc, 0x96, 0x0e, 0x2e, 0x42, 0x28, 0x83, 0x55, 0x2a, 0x54, - 0x3a, 0x39, 0x52, 0x19, 0x99, 0x5a, 0x90, 0x10, 0x62, 0xa9, 0xa2, 0x4e, 0x2c, 0x25, 0x75, 0xac, - 0x34, 0x82, 0xc6, 0xc6, 0x71, 0x51, 0xfa, 0x38, 0x6c, 0x3c, 0x02, 0x0b, 0x12, 0x23, 0x23, 0x8f, - 0x00, 0xe1, 0x25, 0x18, 0x91, 0x93, 0x86, 0x78, 0x80, 0xb2, 0x44, 0x27, 0xd6, 0x39, 0x9f, 0xce, - 0xbd, 0xba, 0xb0, 0xa3, 0x24, 0x73, 0x43, 0xf3, 0xd1, 0x4b, 0xc9, 0x13, 0x2a, 0x95, 0xd0, 0x02, - 0xb7, 0x34, 0x8f, 0x03, 0xae, 0xe6, 0x51, 0xac, 0xa9, 0x92, 0x8c, 0x1a, 0x83, 0xd3, 0xd5, 0xb3, - 0x48, 0x05, 0x13, 0xe9, 0x2b, 0xbd, 0x74, 0x73, 0x9f, 0x1b, 0x8a, 0x50, 0x54, 0xaa, 0x08, 0x3b, - 0xbb, 0xfe, 0x94, 0x45, 0x05, 0xce, 0x86, 0x76, 0xb6, 0xa1, 0xee, 0xf1, 0xdb, 0x05, 0x4f, 0xf4, - 0x28, 0x8a, 0xc3, 0xce, 0x01, 0xe0, 0xd5, 0xef, 0x50, 0x09, 0x3f, 0x60, 0x7e, 0xa2, 0xc7, 0x29, - 0x6e, 0x42, 0x4d, 0xa7, 0x7b, 0xa8, 0x8d, 0x7a, 0x0d, 0xaf, 0xa6, 0xd3, 0x4e, 0x13, 0x1a, 0x1e, - 0x4f, 0xa4, 0x88, 0x13, 0x9e, 0xa7, 0xee, 0x11, 0xb4, 0xca, 0x07, 0x3b, 0x37, 0x80, 0x4d, 0x36, - 0xe3, 0xec, 0x7a, 0xb2, 0x4a, 0xd7, 0xfb, 0x5d, 0x6a, 0x0d, 0x61, 0x2a, 0xd1, 0xa2, 0x4c, 0x99, - 0x3e, 0x31, 0xf6, 0x71, 0xea, 0x6d, 0xb0, 0x42, 0xe0, 0x33, 0x80, 0x80, 0xdf, 0x44, 0x77, 0x5c, - 0x19, 0x48, 0x2d, 0x87, 0xf4, 0xfe, 0x80, 0x9c, 0x16, 0x81, 0x71, 0xea, 0x6d, 0x05, 0xa5, 0xec, - 0x3f, 0x21, 0x68, 0x7c, 0x77, 0x1b, 0x8c, 0xce, 0xf1, 0x05, 0xfc, 0x33, 0xe5, 0x71, 0x9b, 0xfe, - 0xb0, 0x57, 0x6a, 0x2d, 0xc5, 0xd9, 0xff, 0xc5, 0x51, 0x6d, 0x00, 0x5f, 0x41, 0xdd, 0x1e, 0xfc, - 0x70, 0x1d, 0xd3, 0x32, 0x3a, 0xbd, 0xb5, 0x68, 0xcb, 0x39, 0x1c, 0x7d, 0xbe, 0x13, 0xf4, 0x90, - 0x11, 0xf4, 0x98, 0x11, 0xf4, 0x92, 0x11, 0xf4, 0x9a, 0x11, 0xf4, 0x96, 0x11, 0xf4, 0xfc, 0x41, - 0xd0, 0x65, 0x3f, 0x8c, 0xf4, 0x6c, 0x31, 0xa5, 0x4c, 0xcc, 0xdd, 0x8a, 0x68, 0xcb, 0xf2, 0xa4, - 0x8e, 0x99, 0x50, 0xdc, 0x88, 0xe9, 0xff, 0xfc, 0x02, 0x8e, 0xbe, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x30, 0xfd, 0xaa, 0xac, 0x6e, 0x02, 0x00, 0x00, -} - -func (this *RequestPing) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestPing) - if !ok { - that2, ok := that.(RequestPing) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestBroadcastTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestBroadcastTx) - if !ok { - that2, ok := that.(RequestBroadcastTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Tx, that1.Tx) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponsePing) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponsePing) - if !ok { - that2, ok := that.(ResponsePing) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResponseBroadcastTx) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResponseBroadcastTx) - if !ok { - that2, ok := that.(ResponseBroadcastTx) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.CheckTx.Equal(that1.CheckTx) { - return false - } - if !this.DeliverTx.Equal(that1.DeliverTx) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true +func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } + +var fileDescriptor_0ffff5682c662b95 = []byte{ + // 316 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, + 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, + 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, + 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, + 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, + 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, + 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, + 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, + 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, + 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, + 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, + 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, + 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, + 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, + 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, + 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, + 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, + 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -470,7 +341,7 @@ var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "rpc/grpc/types.proto", + Metadata: "tendermint/rpc/grpc/types.proto", } func (m *RequestPing) Marshal() (dAtA []byte, err error) { @@ -493,10 +364,6 @@ func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } return len(dAtA) - i, nil } @@ -520,10 +387,6 @@ func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Tx) > 0 { i -= len(m.Tx) copy(dAtA[i:], m.Tx) @@ -554,10 +417,6 @@ func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } return len(dAtA) - i, nil } @@ -581,10 +440,6 @@ func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.DeliverTx != nil { { size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) @@ -623,130 +478,12 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func NewPopulatedRequestPing(r randyTypes, easy bool) *RequestPing { - this := &RequestPing{} - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 1) - } - return this -} - -func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx { - this := &RequestBroadcastTx{} - v1 := r.Intn(100) - this.Tx = make([]byte, v1) - for i := 0; i < v1; i++ { - this.Tx[i] = byte(r.Intn(256)) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) - } - return this -} - -func NewPopulatedResponsePing(r randyTypes, easy bool) *ResponsePing { - this := &ResponsePing{} - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 1) - } - return this -} - -func NewPopulatedResponseBroadcastTx(r randyTypes, easy bool) *ResponseBroadcastTx { - this := &ResponseBroadcastTx{} - if r.Intn(5) != 0 { - this.CheckTx = types.NewPopulatedResponseCheckTx(r, easy) - } - if r.Intn(5) != 0 { - this.DeliverTx = types.NewPopulatedResponseDeliverTx(r, easy) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) - } - return this -} - -type randyTypes interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int -} - -func randUTF8RuneTypes(r randyTypes) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringTypes(r randyTypes) string { - v2 := r.Intn(100) - tmps := make([]rune, v2) - for i := 0; i < v2; i++ { - tmps[i] = randUTF8RuneTypes(r) - } - return string(tmps) -} -func randUnrecognizedTypes(r randyTypes, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldTypes(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v3 := r.Int63() - if r.Intn(2) == 0 { - v3 *= -1 - } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v3)) - case 1: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateTypes(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { - for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) - v >>= 7 - } - dAtA = append(dAtA, uint8(v)) - return dAtA -} func (m *RequestPing) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -760,9 +497,6 @@ func (m *RequestBroadcastTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -772,9 +506,6 @@ func (m *ResponsePing) Size() (n int) { } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -792,9 +523,6 @@ func (m *ResponseBroadcastTx) Size() (n int) { l = m.DeliverTx.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } @@ -848,7 +576,6 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -936,7 +663,6 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -990,7 +716,6 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1116,7 +841,6 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } diff --git a/rpc/grpc/types.proto b/rpc/grpc/types.proto deleted file mode 100644 index fc778cacd..000000000 --- a/rpc/grpc/types.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; -package tendermint.rpc.grpc; -option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; - -import "third_party/proto/gogoproto/gogo.proto"; -import "abci/types/types.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.goproto_registration) = true; -// Generate tests -option (gogoproto.populate_all) = true; -option (gogoproto.equal_all) = true; -option (gogoproto.testgen_all) = true; -//---------------------------------------- -// Message types - -//---------------------------------------- -// Request types - -message RequestPing {} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing {} - -message ResponseBroadcastTx { - tendermint.abci.types.ResponseCheckTx check_tx = 1; - tendermint.abci.types.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing); - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); -} diff --git a/rpc/grpc/typespb_test.go b/rpc/grpc/typespb_test.go deleted file mode 100644 index d0a6c8654..000000000 --- a/rpc/grpc/typespb_test.go +++ /dev/null @@ -1,522 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: rpc/grpc/types.proto - -package coregrpc - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - golang_proto "github.com/golang/protobuf/proto" - _ "github.com/tendermint/tendermint/abci/types" - math "math" - math_rand "math/rand" - testing "testing" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = golang_proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func TestRequestPingProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestPing{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestPingMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestPing{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBroadcastTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBroadcastTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestRequestBroadcastTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBroadcastTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponsePingProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponsePing{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponsePingMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponsePing{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBroadcastTxProto(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, false) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBroadcastTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - littlefuzz := make([]byte, len(dAtA)) - copy(littlefuzz, dAtA) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } - if len(littlefuzz) > 0 { - fuzzamount := 100 - for i := 0; i < fuzzamount; i++ { - littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) - littlefuzz = append(littlefuzz, byte(popr.Intn(256))) - } - // shouldn't panic - _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) - } -} - -func TestResponseBroadcastTxMarshalTo(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, false) - size := p.Size() - dAtA := make([]byte, size) - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - _, err := p.MarshalTo(dAtA) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBroadcastTx{} - if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - for i := range dAtA { - dAtA[i] = byte(popr.Intn(256)) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestPingJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestPing{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestBroadcastTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &RequestBroadcastTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponsePingJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponsePing{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestResponseBroadcastTxJSON(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, true) - marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} - jsondata, err := marshaler.MarshalToString(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - msg := &ResponseBroadcastTx{} - err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) - } -} -func TestRequestPingProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestPing{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestPingProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestPing{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBroadcastTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &RequestBroadcastTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestBroadcastTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &RequestBroadcastTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponsePingProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponsePing{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponsePingProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponsePing{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBroadcastTxProtoText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &ResponseBroadcastTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestResponseBroadcastTxProtoCompactText(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &ResponseBroadcastTx{} - if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - if !p.Equal(msg) { - t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) - } -} - -func TestRequestPingSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestPing(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestRequestBroadcastTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedRequestBroadcastTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponsePingSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponsePing(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -func TestResponseBroadcastTxSize(t *testing.T) { - seed := time.Now().UnixNano() - popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedResponseBroadcastTx(popr, true) - size2 := github_com_gogo_protobuf_proto.Size(p) - dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) - if err != nil { - t.Fatalf("seed = %d, err = %v", seed, err) - } - size := p.Size() - if len(dAtA) != size { - t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) - } - if size2 != size { - t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) - } - size3 := github_com_gogo_protobuf_proto.Size(p) - if size3 != size { - t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) - } -} - -//These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/rpc/lib/client/args_test.go b/rpc/jsonrpc/client/args_test.go similarity index 86% rename from rpc/lib/client/args_test.go rename to rpc/jsonrpc/client/args_test.go index e3dd09e8f..2506f3073 100644 --- a/rpc/lib/client/args_test.go +++ b/rpc/jsonrpc/client/args_test.go @@ -1,11 +1,10 @@ -package rpcclient +package client import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" ) type Tx []byte @@ -28,11 +27,9 @@ func TestArgToJSON(t *testing.T) { {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, } - cdc := amino.NewCodec() - for i, tc := range cases { args := map[string]interface{}{"data": tc.input} - err := argsToJSON(cdc, args) + err := argsToJSON(args) require.Nil(err, "%d: %+v", i, err) require.Equal(1, len(args), "%d", i) data, ok := args["data"].(string) diff --git a/rpc/lib/client/decode.go b/rpc/jsonrpc/client/decode.go similarity index 70% rename from rpc/lib/client/decode.go rename to rpc/jsonrpc/client/decode.go index dd4a2e4c6..d0f462076 100644 --- a/rpc/lib/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -1,17 +1,15 @@ -package rpcclient +package client import ( "encoding/json" + "errors" + "fmt" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - - types "github.com/tendermint/tendermint/rpc/lib/types" + tmjson "github.com/tendermint/tendermint/libs/json" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func unmarshalResponseBytes( - cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCIntID, result interface{}, @@ -21,7 +19,7 @@ func unmarshalResponseBytes( // into the correct type. response := &types.RPCResponse{} if err := json.Unmarshal(responseBytes, response); err != nil { - return nil, errors.Wrap(err, "error unmarshalling") + return nil, fmt.Errorf("error unmarshalling: %w", err) } if response.Error != nil { @@ -29,19 +27,18 @@ func unmarshalResponseBytes( } if err := validateAndVerifyID(response, expectedID); err != nil { - return nil, errors.Wrap(err, "wrong ID") + return nil, fmt.Errorf("wrong ID: %w", err) } // Unmarshal the RawMessage into the result. - if err := cdc.UnmarshalJSON(response.Result, result); err != nil { - return nil, errors.Wrap(err, "error unmarshalling result") + if err := tmjson.Unmarshal(response.Result, result); err != nil { + return nil, fmt.Errorf("error unmarshalling result: %w", err) } return result, nil } func unmarshalResponseBytesArray( - cdc *amino.Codec, responseBytes []byte, expectedIDs []types.JSONRPCIntID, results []interface{}, @@ -52,14 +49,14 @@ func unmarshalResponseBytesArray( ) if err := json.Unmarshal(responseBytes, &responses); err != nil { - return nil, errors.Wrap(err, "error unmarshalling") + return nil, fmt.Errorf("error unmarshalling: %w", err) } // No response error checking here as there may be a mixture of successful // and unsuccessful responses. if len(results) != len(responses) { - return nil, errors.Errorf( + return nil, fmt.Errorf( "expected %d result objects into which to inject responses, but got %d", len(responses), len(results), @@ -72,16 +69,16 @@ func unmarshalResponseBytesArray( for i, resp := range responses { ids[i], ok = resp.ID.(types.JSONRPCIntID) if !ok { - return nil, errors.Errorf("expected JSONRPCIntID, got %T", resp.ID) + return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) } } if err := validateResponseIDs(ids, expectedIDs); err != nil { - return nil, errors.Wrap(err, "wrong IDs") + return nil, fmt.Errorf("wrong IDs: %w", err) } for i := 0; i < len(responses); i++ { - if err := cdc.UnmarshalJSON(responses[i].Result, results[i]); err != nil { - return nil, errors.Wrapf(err, "error unmarshalling #%d result", i) + if err := tmjson.Unmarshal(responses[i].Result, results[i]); err != nil { + return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) } } @@ -98,7 +95,7 @@ func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { if m[id] { delete(m, id) } else { - return errors.Errorf("unsolicited ID #%d: %v", i, id) + return fmt.Errorf("unsolicited ID #%d: %v", i, id) } } @@ -112,7 +109,7 @@ func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) return err } if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type - return errors.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) + return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) } return nil } @@ -123,7 +120,7 @@ func validateResponseID(id interface{}) error { } _, ok := id.(types.JSONRPCIntID) if !ok { - return errors.Errorf("expected JSONRPCIntID, but got: %T", id) + return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) } return nil } diff --git a/rpc/lib/client/encode.go b/rpc/jsonrpc/client/encode.go similarity index 66% rename from rpc/lib/client/encode.go rename to rpc/jsonrpc/client/encode.go index 227367f59..e085f51a2 100644 --- a/rpc/lib/client/encode.go +++ b/rpc/jsonrpc/client/encode.go @@ -1,20 +1,20 @@ -package rpcclient +package client import ( "fmt" "net/url" "reflect" - amino "github.com/tendermint/go-amino" + tmjson "github.com/tendermint/tendermint/libs/json" ) -func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) { +func argsToURLValues(args map[string]interface{}) (url.Values, error) { values := make(url.Values) if len(args) == 0 { return values, nil } - err := argsToJSON(cdc, args) + err := argsToJSON(args) if err != nil { return nil, err } @@ -26,7 +26,7 @@ func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, return values, nil } -func argsToJSON(cdc *amino.Codec, args map[string]interface{}) error { +func argsToJSON(args map[string]interface{}) error { for k, v := range args { rt := reflect.TypeOf(v) isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 @@ -36,7 +36,7 @@ func argsToJSON(cdc *amino.Codec, args map[string]interface{}) error { continue } - data, err := cdc.MarshalJSON(v) + data, err := tmjson.Marshal(v) if err != nil { return err } diff --git a/rpc/lib/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go similarity index 64% rename from rpc/lib/client/http_json_client.go rename to rpc/jsonrpc/client/http_json_client.go index 5f10bf294..59727390a 100644 --- a/rpc/lib/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -1,7 +1,8 @@ -package rpcclient +package client import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" @@ -9,12 +10,9 @@ import ( "net/http" "net/url" "strings" - "sync" - "github.com/pkg/errors" - amino "github.com/tendermint/go-amino" - - types "github.com/tendermint/tendermint/rpc/lib/types" + tmsync "github.com/tendermint/tendermint/libs/sync" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -68,7 +66,7 @@ func (u parsedURL) GetHostWithPath() string { // Get a trimmed address - useful for WS connections func (u parsedURL) GetTrimmedHostWithPath() string { // replace / with . for http requests (kvstore domain) - return strings.Replace(u.GetHostWithPath(), "/", ".", -1) + return strings.ReplaceAll(u.GetHostWithPath(), "/", ".") } // Get a trimmed address with protocol - useful as address in RPC connections @@ -81,60 +79,52 @@ func (u parsedURL) GetTrimmedURL() string { // HTTPClient is a common interface for JSON-RPC HTTP clients. type HTTPClient interface { // Call calls the given method with the params and returns a result. - Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) - // Codec returns an amino codec used. - Codec() *amino.Codec - // SetCodec sets an amino codec. - SetCodec(*amino.Codec) + Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) } -// JSONRPCCaller implementers can facilitate calling the JSON-RPC endpoint. -type JSONRPCCaller interface { - Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) +// Caller implementers can facilitate calling the JSON-RPC endpoint. +type Caller interface { + Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) } //------------------------------------------------------------- -// JSONRPCClient is a JSON-RPC client, which sends POST HTTP requests to the +// Client is a JSON-RPC client, which sends POST HTTP requests to the // remote server. // -// Request values are amino encoded. Response is expected to be amino encoded. -// New amino codec is used if no other codec was set using SetCodec. -// -// JSONRPCClient is safe for concurrent use by multiple goroutines. -type JSONRPCClient struct { +// Client is safe for concurrent use by multiple goroutines. +type Client struct { address string username string password string client *http.Client - cdc *amino.Codec - mtx sync.Mutex + mtx tmsync.Mutex nextReqID int } -var _ HTTPClient = (*JSONRPCClient)(nil) +var _ HTTPClient = (*Client)(nil) -// Both JSONRPCClient and JSONRPCRequestBatch can facilitate calls to the JSON +// Both Client and RequestBatch can facilitate calls to the JSON // RPC endpoint. -var _ JSONRPCCaller = (*JSONRPCClient)(nil) -var _ JSONRPCCaller = (*JSONRPCRequestBatch)(nil) +var _ Caller = (*Client)(nil) +var _ Caller = (*RequestBatch)(nil) -// NewJSONRPCClient returns a JSONRPCClient pointed at the given address. +// New returns a Client pointed at the given address. // An error is returned on invalid remote. The function panics when remote is nil. -func NewJSONRPCClient(remote string) (*JSONRPCClient, error) { +func New(remote string) (*Client, error) { httpClient, err := DefaultHTTPClient(remote) if err != nil { return nil, err } - return NewJSONRPCClientWithHTTPClient(remote, httpClient) + return NewWithHTTPClient(remote, httpClient) } -// NewJSONRPCClientWithHTTPClient returns a JSONRPCClient pointed at the given +// NewWithHTTPClient returns a Client pointed at the given // address using a custom http client. An error is returned on invalid remote. // The function panics when remote is nil. -func NewJSONRPCClientWithHTTPClient(remote string, client *http.Client) (*JSONRPCClient, error) { +func NewWithHTTPClient(remote string, client *http.Client) (*Client, error) { if client == nil { panic("nil http.Client provided") } @@ -150,67 +140,72 @@ func NewJSONRPCClientWithHTTPClient(remote string, client *http.Client) (*JSONRP username := parsedURL.User.Username() password, _ := parsedURL.User.Password() - rpcClient := &JSONRPCClient{ + rpcClient := &Client{ address: address, username: username, password: password, client: client, - cdc: amino.NewCodec(), } return rpcClient, nil } // Call issues a POST HTTP request. Requests are JSON encoded. Content-Type: -// text/json. -func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { +// application/json. +func (c *Client) Call( + ctx context.Context, + method string, + params map[string]interface{}, + result interface{}, +) (interface{}, error) { id := c.nextRequestID() - request, err := types.MapToRequest(c.cdc, id, method, params) + request, err := types.MapToRequest(id, method, params) if err != nil { - return nil, errors.Wrap(err, "failed to encode params") + return nil, fmt.Errorf("failed to encode params: %w", err) } requestBytes, err := json.Marshal(request) if err != nil { - return nil, errors.Wrap(err, "failed to marshal request") + return nil, fmt.Errorf("failed to marshal request: %w", err) } requestBuf := bytes.NewBuffer(requestBytes) - httpRequest, err := http.NewRequest(http.MethodPost, c.address, requestBuf) + httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, requestBuf) if err != nil { - return nil, errors.Wrap(err, "Request failed") + return nil, fmt.Errorf("request failed: %w", err) } - httpRequest.Header.Set("Content-Type", "text/json") + + httpRequest.Header.Set("Content-Type", "application/json") + if c.username != "" || c.password != "" { httpRequest.SetBasicAuth(c.username, c.password) } + httpResponse, err := c.client.Do(httpRequest) if err != nil { - return nil, errors.Wrap(err, "Post failed") + return nil, fmt.Errorf("post failed: %w", err) } - defer httpResponse.Body.Close() // nolint: errcheck + + defer httpResponse.Body.Close() responseBytes, err := ioutil.ReadAll(httpResponse.Body) if err != nil { - return nil, errors.Wrap(err, "failed to read response body") + return nil, fmt.Errorf("failed to read response body: %w", err) } - return unmarshalResponseBytes(c.cdc, responseBytes, id, result) + return unmarshalResponseBytes(responseBytes, id, result) } -func (c *JSONRPCClient) Codec() *amino.Codec { return c.cdc } -func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) { c.cdc = cdc } - // NewRequestBatch starts a batch of requests for this client. -func (c *JSONRPCClient) NewRequestBatch() *JSONRPCRequestBatch { - return &JSONRPCRequestBatch{ +func (c *Client) NewRequestBatch() *RequestBatch { + return &RequestBatch{ requests: make([]*jsonRPCBufferedRequest, 0), client: c, } } -func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interface{}, error) { +func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { reqs := make([]types.RPCRequest, 0, len(requests)) results := make([]interface{}, 0, len(requests)) for _, req := range requests { @@ -221,26 +216,30 @@ func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interfa // serialize the array of requests into a single JSON object requestBytes, err := json.Marshal(reqs) if err != nil { - return nil, errors.Wrap(err, "failed to marshal requests") + return nil, fmt.Errorf("json marshal: %w", err) } - httpRequest, err := http.NewRequest(http.MethodPost, c.address, bytes.NewBuffer(requestBytes)) + httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, bytes.NewBuffer(requestBytes)) if err != nil { - return nil, errors.Wrap(err, "Request failed") + return nil, fmt.Errorf("new request: %w", err) } - httpRequest.Header.Set("Content-Type", "text/json") + + httpRequest.Header.Set("Content-Type", "application/json") + if c.username != "" || c.password != "" { httpRequest.SetBasicAuth(c.username, c.password) } + httpResponse, err := c.client.Do(httpRequest) if err != nil { - return nil, errors.Wrap(err, "Post failed") + return nil, fmt.Errorf("post: %w", err) } - defer httpResponse.Body.Close() // nolint: errcheck + + defer httpResponse.Body.Close() responseBytes, err := ioutil.ReadAll(httpResponse.Body) if err != nil { - return nil, errors.Wrap(err, "failed to read response body") + return nil, fmt.Errorf("read response body: %w", err) } // collect ids to check responses IDs in unmarshalResponseBytesArray @@ -249,10 +248,10 @@ func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interfa ids[i] = req.request.ID.(types.JSONRPCIntID) } - return unmarshalResponseBytesArray(c.cdc, responseBytes, ids, results) + return unmarshalResponseBytesArray(responseBytes, ids, results) } -func (c *JSONRPCClient) nextRequestID() types.JSONRPCIntID { +func (c *Client) nextRequestID() types.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ @@ -269,37 +268,37 @@ type jsonRPCBufferedRequest struct { result interface{} // The result will be deserialized into this object. } -// JSONRPCRequestBatch allows us to buffer multiple request/response structures +// RequestBatch allows us to buffer multiple request/response structures // into a single batch request. Note that this batch acts like a FIFO queue, and // is thread-safe. -type JSONRPCRequestBatch struct { - client *JSONRPCClient +type RequestBatch struct { + client *Client - mtx sync.Mutex + mtx tmsync.Mutex requests []*jsonRPCBufferedRequest } // Count returns the number of enqueued requests waiting to be sent. -func (b *JSONRPCRequestBatch) Count() int { +func (b *RequestBatch) Count() int { b.mtx.Lock() defer b.mtx.Unlock() return len(b.requests) } -func (b *JSONRPCRequestBatch) enqueue(req *jsonRPCBufferedRequest) { +func (b *RequestBatch) enqueue(req *jsonRPCBufferedRequest) { b.mtx.Lock() defer b.mtx.Unlock() b.requests = append(b.requests, req) } // Clear empties out the request batch. -func (b *JSONRPCRequestBatch) Clear() int { +func (b *RequestBatch) Clear() int { b.mtx.Lock() defer b.mtx.Unlock() return b.clear() } -func (b *JSONRPCRequestBatch) clear() int { +func (b *RequestBatch) clear() int { count := len(b.requests) b.requests = make([]*jsonRPCBufferedRequest, 0) return count @@ -308,24 +307,25 @@ func (b *JSONRPCRequestBatch) clear() int { // Send will attempt to send the current batch of enqueued requests, and then // will clear out the requests once done. On success, this returns the // deserialized list of results from each of the enqueued requests. -func (b *JSONRPCRequestBatch) Send() ([]interface{}, error) { +func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { b.mtx.Lock() defer func() { b.clear() b.mtx.Unlock() }() - return b.client.sendBatch(b.requests) + return b.client.sendBatch(ctx, b.requests) } // Call enqueues a request to call the given RPC method with the specified -// parameters, in the same way that the `JSONRPCClient.Call` function would. -func (b *JSONRPCRequestBatch) Call( +// parameters, in the same way that the `Client.Call` function would. +func (b *RequestBatch) Call( + _ context.Context, method string, params map[string]interface{}, result interface{}, ) (interface{}, error) { id := b.client.nextRequestID() - request, err := types.MapToRequest(b.client.cdc, id, method, params) + request, err := types.MapToRequest(id, method, params) if err != nil { return nil, err } diff --git a/rpc/lib/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go similarity index 96% rename from rpc/lib/client/http_json_client_test.go rename to rpc/jsonrpc/client/http_json_client_test.go index 23ea5fbe7..830259723 100644 --- a/rpc/lib/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,4 +1,4 @@ -package rpcclient +package client import ( "testing" diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go new file mode 100644 index 000000000..3f376ddb0 --- /dev/null +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -0,0 +1,85 @@ +package client + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +const ( + // URIClientRequestID in a request ID used by URIClient + URIClientRequestID = types.JSONRPCIntID(-1) +) + +// URIClient is a JSON-RPC client, which sends POST form HTTP requests to the +// remote server. +// +// URIClient is safe for concurrent use by multiple goroutines. +type URIClient struct { + address string + client *http.Client +} + +var _ HTTPClient = (*URIClient)(nil) + +// NewURI returns a new client. +// An error is returned on invalid remote. +// The function panics when remote is nil. +func NewURI(remote string) (*URIClient, error) { + parsedURL, err := newParsedURL(remote) + if err != nil { + return nil, err + } + + httpClient, err := DefaultHTTPClient(remote) + if err != nil { + return nil, err + } + + parsedURL.SetDefaultSchemeHTTP() + + uriClient := &URIClient{ + address: parsedURL.GetTrimmedURL(), + client: httpClient, + } + + return uriClient, nil +} + +// Call issues a POST form HTTP request. +func (c *URIClient) Call(ctx context.Context, method string, + params map[string]interface{}, result interface{}) (interface{}, error) { + + values, err := argsToURLValues(params) + if err != nil { + return nil, fmt.Errorf("failed to encode params: %w", err) + } + + req, err := http.NewRequestWithContext( + ctx, + http.MethodPost, + c.address+"/"+method, + strings.NewReader(values.Encode()), + ) + if err != nil { + return nil, fmt.Errorf("new request: %w", err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("post: %w", err) + } + defer resp.Body.Close() + + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response body: %w", err) + } + + return unmarshalResponseBytes(responseBytes, URIClientRequestID, result) +} diff --git a/rpc/lib/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go similarity index 96% rename from rpc/lib/client/integration_test.go rename to rpc/jsonrpc/client/integration_test.go index 5fee3752b..228bbb460 100644 --- a/rpc/lib/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -3,7 +3,7 @@ // The code in here is comprehensive as an integration // test and is long, hence is only run before releases. -package rpcclient +package client import ( "bytes" @@ -29,7 +29,7 @@ func TestWSClientReconnectWithJitter(t *testing.T) { buf := new(bytes.Buffer) logger := log.NewTMLogger(buf) for i := 0; i < n; i++ { - c, err := NewWSClient("tcp://foo", "/websocket") + c, err := NewWS("tcp://foo", "/websocket") require.Nil(t, err) c.Dialer = func(string, string) (net.Conn, error) { return nil, errNotConnected diff --git a/rpc/lib/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go similarity index 92% rename from rpc/lib/client/ws_client.go rename to rpc/jsonrpc/client/ws_client.go index ddddc97cf..1c7ade657 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -1,4 +1,4 @@ -package rpcclient +package client import ( "context" @@ -10,14 +10,12 @@ import ( "time" "github.com/gorilla/websocket" - "github.com/pkg/errors" metrics "github.com/rcrowley/go-metrics" - amino "github.com/tendermint/go-amino" - tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" - types "github.com/tendermint/tendermint/rpc/lib/types" + tmsync "github.com/tendermint/tendermint/libs/sync" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -33,7 +31,6 @@ const ( // WSClient is safe for concurrent use by multiple goroutines. type WSClient struct { // nolint: maligned conn *websocket.Conn - cdc *amino.Codec Address string // IP:PORT or /path/to/socket Endpoint string // /websocket/url/endpoint @@ -60,7 +57,7 @@ type WSClient struct { // nolint: maligned wg sync.WaitGroup - mtx sync.RWMutex + mtx tmsync.RWMutex sentLastPingAt time.Time reconnecting bool nextReqID int @@ -82,11 +79,11 @@ type WSClient struct { // nolint: maligned PingPongLatencyTimer metrics.Timer } -// NewWSClient returns a new client. See the commentary on the func(*WSClient) +// NewWS returns a new client. See the commentary on the func(*WSClient) // functions for a detailed description of how to configure ping period and // pong wait time. The endpoint argument must begin with a `/`. // An error is returned on invalid remote. The function panics when remote is nil. -func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) (*WSClient, error) { +func NewWS(remoteAddr, endpoint string, options ...func(*WSClient)) (*WSClient, error) { parsedURL, err := newParsedURL(remoteAddr) if err != nil { return nil, err @@ -102,7 +99,6 @@ func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) (*WSCl } c := &WSClient{ - cdc: amino.NewCodec(), Address: parsedURL.GetTrimmedHostWithPath(), Dialer: dialFn, Endpoint: endpoint, @@ -235,7 +231,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { // Call enqueues a call request onto the Send queue. Requests are JSON encoded. func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error { - request, err := types.MapToRequest(c.cdc, c.nextRequestID(), method, params) + request, err := types.MapToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -245,17 +241,13 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in // CallWithArrayParams enqueues a call request onto the Send queue. Params are // in a form of array (e.g. []interface{}{"abcd"}). Requests are JSON encoded. func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error { - request, err := types.ArrayToRequest(c.cdc, c.nextRequestID(), method, params) + request, err := types.ArrayToRequest(c.nextRequestID(), method, params) if err != nil { return err } return c.Send(ctx, request) } -func (c *WSClient) Codec() *amino.Codec { return c.cdc } -func (c *WSClient) SetCodec(cdc *amino.Codec) { c.cdc = cdc } - -/////////////////////////////////////////////////////////////////////////////// // Private methods func (c *WSClient) nextRequestID() types.JSONRPCIntID { @@ -295,8 +287,8 @@ func (c *WSClient) reconnect() error { }() for { - jitterSeconds := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitterSeconds + ((1 << uint(attempt)) * time.Second) + jitter := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) + backoffDuration := jitter + ((1 << uint(attempt)) * time.Second) c.Logger.Info("reconnecting", "attempt", attempt+1, "backoff_duration", backoffDuration) time.Sleep(backoffDuration) @@ -315,7 +307,7 @@ func (c *WSClient) reconnect() error { attempt++ if attempt > c.maxReconnectAttempts { - return errors.Wrap(err, "reached maximum reconnect attempts") + return fmt.Errorf("reached maximum reconnect attempts: %w", err) } } } @@ -356,7 +348,10 @@ func (c *WSClient) reconnectRoutine() { c.wg.Wait() if err := c.reconnect(); err != nil { c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) - c.Stop() + if err = c.Stop(); err != nil { + c.Logger.Error("failed to stop conn", "error", err) + } + return } // drain reconnectAfter @@ -525,7 +520,6 @@ func (c *WSClient) readRoutine() { } } -/////////////////////////////////////////////////////////////////////////////// // Predefined methods // Subscribe to a query. Note the server must have a "subscribe" route diff --git a/rpc/lib/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go similarity index 90% rename from rpc/lib/client/ws_client_test.go rename to rpc/jsonrpc/client/ws_client_test.go index a4f033867..2e6403806 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -1,4 +1,4 @@ -package rpcclient +package client import ( "context" @@ -13,15 +13,15 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - - types "github.com/tendermint/tendermint/rpc/lib/types" + tmsync "github.com/tendermint/tendermint/libs/sync" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var wsCallTimeout = 5 * time.Second type myHandler struct { closeConnAfterRead bool - mtx sync.RWMutex + mtx tmsync.RWMutex } var upgrader = websocket.Upgrader{ @@ -34,7 +34,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { panic(err) } - defer conn.Close() // nolint: errcheck + defer conn.Close() for { messageType, in, err := conn.ReadMessage() if err != nil { @@ -72,7 +72,7 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { defer s.Close() c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() + defer c.Stop() // nolint:errcheck // ignore for tests wg.Add(1) go callWgDoneOnResult(t, c, &wg) @@ -104,7 +104,7 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { s := httptest.NewServer(h) c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() + defer c.Stop() // nolint:errcheck // ignore for tests wg.Add(2) go callWgDoneOnResult(t, c, &wg) @@ -132,7 +132,7 @@ func TestWSClientReconnectFailure(t *testing.T) { s := httptest.NewServer(h) c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() + defer c.Stop() // nolint:errcheck // ignore for tests go func() { for { @@ -181,14 +181,15 @@ func TestNotBlockingOnStop(t *testing.T) { timeout := 2 * time.Second s := httptest.NewServer(&myHandler{}) c := startClient(t, "//"+s.Listener.Addr().String()) - c.Call(context.Background(), "a", make(map[string]interface{})) + c.Call(context.Background(), "a", make(map[string]interface{})) // nolint:errcheck // ignore for tests // Let the readRoutine get around to blocking time.Sleep(time.Second) passCh := make(chan struct{}) go func() { // Unless we have a non-blocking write to ResponsesCh from readRoutine // this blocks forever ont the waitgroup - c.Stop() + err := c.Stop() + require.NoError(t, err) passCh <- struct{}{} }() select { @@ -201,7 +202,7 @@ func TestNotBlockingOnStop(t *testing.T) { } func startClient(t *testing.T, addr string) *WSClient { - c, err := NewWSClient(addr, "/websocket") + c, err := NewWS(addr, "/websocket") require.Nil(t, err) err = c.Start() require.Nil(t, err) diff --git a/rpc/lib/doc.go b/rpc/jsonrpc/doc.go similarity index 97% rename from rpc/lib/doc.go rename to rpc/jsonrpc/doc.go index 3e8314b80..b014fe38d 100644 --- a/rpc/lib/doc.go +++ b/rpc/jsonrpc/doc.go @@ -73,7 +73,7 @@ // logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) // listener, err := rpc.Listen("0.0.0.0:8080", rpcserver.Config{}) // if err != nil { panic(err) } -// go rpcserver.StartHTTPServer(listener, mux, logger) +// go rpcserver.Serve(listener, mux, logger) // // Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) // Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. @@ -82,4 +82,4 @@ // Examples // // - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) -package rpc +package jsonrpc diff --git a/rpc/lib/rpc_test.go b/rpc/jsonrpc/jsonrpc_test.go similarity index 79% rename from rpc/lib/rpc_test.go rename to rpc/jsonrpc/jsonrpc_test.go index aef795d3e..ec12f85d7 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -1,4 +1,4 @@ -package rpc +package jsonrpc import ( "bytes" @@ -16,15 +16,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" - client "github.com/tendermint/tendermint/rpc/lib/client" - server "github.com/tendermint/tendermint/rpc/lib/server" - types "github.com/tendermint/tendermint/rpc/lib/types" + client "github.com/tendermint/tendermint/rpc/jsonrpc/client" + server "github.com/tendermint/tendermint/rpc/jsonrpc/server" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -39,6 +37,10 @@ const ( testVal = "acbd" ) +var ( + ctx = context.Background() +) + type ResultEcho struct { Value string `json:"value"` } @@ -64,9 +66,6 @@ var Routes = map[string]*server.RPCFunc{ "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), } -// Amino codec required to encode/decode everything above. -var RoutesCdc = amino.NewCodec() - func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } @@ -121,8 +120,8 @@ func setup() { tcpLogger := logger.With("socket", "tcp") mux := http.NewServeMux() - server.RegisterRPCFuncs(mux, Routes, RoutesCdc, tcpLogger) - wm := server.NewWebsocketManager(Routes, RoutesCdc, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) + server.RegisterRPCFuncs(mux, Routes, tcpLogger) + wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() @@ -130,63 +129,71 @@ func setup() { if err != nil { panic(err) } - go server.StartHTTPServer(listener1, mux, tcpLogger, config) + go func() { + if err := server.Serve(listener1, mux, tcpLogger, config); err != nil { + panic(err) + } + }() unixLogger := logger.With("socket", "unix") mux2 := http.NewServeMux() - server.RegisterRPCFuncs(mux2, Routes, RoutesCdc, unixLogger) - wm = server.NewWebsocketManager(Routes, RoutesCdc) + server.RegisterRPCFuncs(mux2, Routes, unixLogger) + wm = server.NewWebsocketManager(Routes) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) listener2, err := server.Listen(unixAddr, config) if err != nil { panic(err) } - go server.StartHTTPServer(listener2, mux2, unixLogger, config) + go func() { + if err := server.Serve(listener2, mux2, unixLogger, config); err != nil { + panic(err) + } + }() // wait for servers to start time.Sleep(time.Second * 2) } -func echoViaHTTP(cl client.JSONRPCCaller, val string) (string, error) { +func echoViaHTTP(cl client.Caller, val string) (string, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEcho) - if _, err := cl.Call("echo", params, result); err != nil { + if _, err := cl.Call(ctx, "echo", params, result); err != nil { return "", err } return result.Value, nil } -func echoIntViaHTTP(cl client.JSONRPCCaller, val int) (int, error) { +func echoIntViaHTTP(cl client.Caller, val int) (int, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEchoInt) - if _, err := cl.Call("echo_int", params, result); err != nil { + if _, err := cl.Call(ctx, "echo_int", params, result); err != nil { return 0, err } return result.Value, nil } -func echoBytesViaHTTP(cl client.JSONRPCCaller, bytes []byte) ([]byte, error) { +func echoBytesViaHTTP(cl client.Caller, bytes []byte) ([]byte, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoBytes) - if _, err := cl.Call("echo_bytes", params, result); err != nil { + if _, err := cl.Call(ctx, "echo_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil } -func echoDataBytesViaHTTP(cl client.JSONRPCCaller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { +func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoDataBytes) - if _, err := cl.Call("echo_data_bytes", params, result); err != nil { + if _, err := cl.Call(ctx, "echo_data_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil @@ -275,29 +282,30 @@ func testWithWSClient(t *testing.T, cl *client.WSClient) { func TestServersAndClientsBasic(t *testing.T) { serverAddrs := [...]string{tcpAddr, unixAddr} for _, addr := range serverAddrs { - cl1, err := client.NewURIClient(addr) + cl1, err := client.NewURI(addr) require.Nil(t, err) fmt.Printf("=== testing server on %s using URI client", addr) testWithHTTPClient(t, cl1) - cl2, err := client.NewJSONRPCClient(addr) + cl2, err := client.New(addr) require.Nil(t, err) fmt.Printf("=== testing server on %s using JSONRPC client", addr) testWithHTTPClient(t, cl2) - cl3, err := client.NewWSClient(addr, websocketEndpoint) + cl3, err := client.NewWS(addr, websocketEndpoint) require.Nil(t, err) cl3.SetLogger(log.TestingLogger()) err = cl3.Start() require.Nil(t, err) fmt.Printf("=== testing server on %s using WS client", addr) testWithWSClient(t, cl3) - cl3.Stop() + err = cl3.Stop() + require.NoError(t, err) } } func TestHexStringArg(t *testing.T) { - cl, err := client.NewURIClient(tcpAddr) + cl, err := client.NewURI(tcpAddr) require.Nil(t, err) // should NOT be handled as hex val := "0xabc" @@ -307,7 +315,7 @@ func TestHexStringArg(t *testing.T) { } func TestQuotedStringArg(t *testing.T) { - cl, err := client.NewURIClient(tcpAddr) + cl, err := client.NewURI(tcpAddr) require.Nil(t, err) // should NOT be unquoted val := "\"abc\"" @@ -317,12 +325,16 @@ func TestQuotedStringArg(t *testing.T) { } func TestWSNewWSRPCFunc(t *testing.T) { - cl, err := client.NewWSClient(tcpAddr, websocketEndpoint) + cl, err := client.NewWS(tcpAddr, websocketEndpoint) require.Nil(t, err) cl.SetLogger(log.TestingLogger()) err = cl.Start() require.Nil(t, err) - defer cl.Stop() + t.Cleanup(func() { + if err := cl.Stop(); err != nil { + t.Error(err) + } + }) val := testVal params := map[string]interface{}{ @@ -343,12 +355,16 @@ func TestWSNewWSRPCFunc(t *testing.T) { } func TestWSHandlesArrayParams(t *testing.T) { - cl, err := client.NewWSClient(tcpAddr, websocketEndpoint) + cl, err := client.NewWS(tcpAddr, websocketEndpoint) require.Nil(t, err) cl.SetLogger(log.TestingLogger()) err = cl.Start() require.Nil(t, err) - defer cl.Stop() + t.Cleanup(func() { + if err := cl.Stop(); err != nil { + t.Error(err) + } + }) val := testVal params := []interface{}{val} @@ -369,12 +385,16 @@ func TestWSHandlesArrayParams(t *testing.T) { // TestWSClientPingPong checks that a client & server exchange pings // & pongs so connection stays alive. func TestWSClientPingPong(t *testing.T) { - cl, err := client.NewWSClient(tcpAddr, websocketEndpoint) + cl, err := client.NewWS(tcpAddr, websocketEndpoint) require.Nil(t, err) cl.SetLogger(log.TestingLogger()) err = cl.Start() require.Nil(t, err) - defer cl.Stop() + t.Cleanup(func() { + if err := cl.Stop(); err != nil { + t.Error(err) + } + }) time.Sleep(6 * time.Second) } @@ -384,5 +404,5 @@ func randBytes(t *testing.T) []byte { buf := make([]byte, n) _, err := crand.Read(buf) require.Nil(t, err) - return bytes.Replace(buf, []byte("="), []byte{100}, -1) + return bytes.ReplaceAll(buf, []byte("="), []byte{100}) } diff --git a/rpc/lib/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go similarity index 78% rename from rpc/lib/server/http_json_handler.go rename to rpc/jsonrpc/server/http_json_handler.go index 65c0a680f..ffe9d133b 100644 --- a/rpc/lib/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -1,4 +1,4 @@ -package rpcserver +package server import ( "bytes" @@ -9,28 +9,24 @@ import ( "reflect" "sort" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -/////////////////////////////////////////////////////////////////////////////// // HTTP + JSON handler -/////////////////////////////////////////////////////////////////////////////// // jsonrpc calls grab the given method's function info and runs reflect.Call -func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) http.HandlerFunc { +func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { - WriteRPCResponseHTTP( + WriteRPCResponseHTTPError( w, + http.StatusBadRequest, types.RPCInvalidRequestError( nil, - errors.Wrap(err, "error reading request body"), + fmt.Errorf("error reading request body: %w", err), ), ) return @@ -52,10 +48,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo // next, try to unmarshal as a single request var request types.RPCRequest if err := json.Unmarshal(b, &request); err != nil { - WriteRPCResponseHTTP( + WriteRPCResponseHTTPError( w, + http.StatusInternalServerError, types.RPCParseError( - errors.Wrap(err, "error unmarshalling request"), + fmt.Errorf("error unmarshalling request: %w", err), ), ) return @@ -78,7 +75,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo if len(r.URL.Path) > 1 { responses = append( responses, - types.RPCInvalidRequestError(request.ID, errors.Errorf("path %s is invalid", r.URL.Path)), + types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) continue } @@ -90,11 +87,11 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo ctx := &types.Context{JSONReq: &request, HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) + fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { responses = append( responses, - types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "error converting json params to arguments")), + types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) continue } @@ -107,10 +104,10 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo responses = append(responses, types.RPCInternalError(request.ID, err)) continue } - responses = append(responses, types.NewRPCSuccessResponse(cdc, request.ID, result)) + responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) } if len(responses) > 0 { - WriteRPCResponseArrayHTTP(w, responses) + WriteRPCResponseHTTP(w, responses...) } } } @@ -130,7 +127,6 @@ func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { func mapParamsToArgs( rpcFunc *RPCFunc, - cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { @@ -141,7 +137,7 @@ func mapParamsToArgs( if p, ok := params[argName]; ok && p != nil && len(p) > 0 { val := reflect.New(argType) - err := cdc.UnmarshalJSON(p, val.Interface()) + err := tmjson.Unmarshal(p, val.Interface()) if err != nil { return nil, err } @@ -156,13 +152,12 @@ func mapParamsToArgs( func arrayParamsToArgs( rpcFunc *RPCFunc, - cdc *amino.Codec, params []json.RawMessage, argsOffset int, ) ([]reflect.Value, error) { if len(rpcFunc.argNames) != len(params) { - return nil, errors.Errorf("expected %v parameters (%v), got %v (%v)", + return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) } @@ -170,7 +165,7 @@ func arrayParamsToArgs( for i, p := range params { argType := rpcFunc.args[i+argsOffset] val := reflect.New(argType) - err := cdc.UnmarshalJSON(p, val.Interface()) + err := tmjson.Unmarshal(p, val.Interface()) if err != nil { return nil, err } @@ -185,7 +180,7 @@ func arrayParamsToArgs( // Example: // rpcFunc.args = [rpctypes.Context string] // rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte) ([]reflect.Value, error) { +func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { const argsOffset = 1 // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? @@ -193,18 +188,18 @@ func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte) ([]reflect var m map[string]json.RawMessage err := json.Unmarshal(raw, &m) if err == nil { - return mapParamsToArgs(rpcFunc, cdc, m, argsOffset) + return mapParamsToArgs(rpcFunc, m, argsOffset) } // Otherwise, try an array. var a []json.RawMessage err = json.Unmarshal(raw, &a) if err == nil { - return arrayParamsToArgs(rpcFunc, cdc, a, argsOffset) + return arrayParamsToArgs(rpcFunc, a, argsOffset) } // Otherwise, bad format, we cannot parse - return nil, errors.Errorf("unknown type for JSON params: %v. Expected map or array", err) + return nil, fmt.Errorf("unknown type for JSON params: %v. Expected map or array", err) } // writes a list of available rpc endpoints as an html page diff --git a/rpc/lib/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go similarity index 95% rename from rpc/lib/server/http_json_handler_test.go rename to rpc/jsonrpc/server/http_json_handler_test.go index ef1fcc9f5..a5c14e59a 100644 --- a/rpc/lib/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -1,4 +1,4 @@ -package rpcserver +package server import ( "bytes" @@ -12,21 +12,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } - cdc := amino.NewCodec() mux := http.NewServeMux() buf := new(bytes.Buffer) logger := log.NewTMLogger(buf) - RegisterRPCFuncs(mux, funcMap, cdc, logger) + RegisterRPCFuncs(mux, funcMap, logger) return mux } @@ -68,7 +65,7 @@ func TestRPCParams(t *testing.T) { res := rec.Result() defer res.Body.Close() // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) blob, err := ioutil.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) @@ -115,7 +112,7 @@ func TestJSONRPCID(t *testing.T) { mux.ServeHTTP(rec, req) res := rec.Result() // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) blob, err := ioutil.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) diff --git a/rpc/lib/server/http_server.go b/rpc/jsonrpc/server/http_server.go similarity index 68% rename from rpc/lib/server/http_server.go rename to rpc/jsonrpc/server/http_server.go index 501396867..b323d46fd 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -1,21 +1,22 @@ // Commons for HTTP handling -package rpcserver +package server import ( "bufio" "encoding/json" + "errors" "fmt" "net" "net/http" + "os" "runtime/debug" "strings" "time" - "github.com/pkg/errors" "golang.org/x/net/netutil" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Config is a RPC server configuration. @@ -44,10 +45,12 @@ func DefaultConfig() *Config { } } -// StartHTTPServer takes a listener and starts an HTTP server with the given handler. -// It wraps handler with RecoverAndLogHandler. +// Serve creates a http.Server and calls Serve with the given listener. It +// wraps handler with RecoverAndLogHandler and a handler, which limits the max +// body size to config.MaxBodyBytes. +// // NOTE: This function blocks - you may want to call it in a go-routine. -func StartHTTPServer(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { +func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) s := &http.Server{ Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), @@ -60,10 +63,12 @@ func StartHTTPServer(listener net.Listener, handler http.Handler, logger log.Log return err } -// StartHTTPAndTLSServer takes a listener and starts an HTTPS server with the given handler. -// It wraps handler with RecoverAndLogHandler. +// Serve creates a http.Server and calls ServeTLS with the given listener, +// certFile and keyFile. It wraps handler with RecoverAndLogHandler and a +// handler, which limits the max body size to config.MaxBodyBytes. +// // NOTE: This function blocks - you may want to call it in a go-routine. -func StartHTTPAndTLSServer( +func ServeTLS( listener net.Listener, handler http.Handler, certFile, keyFile string, @@ -84,6 +89,9 @@ func StartHTTPAndTLSServer( return err } +// WriteRPCResponseHTTPError marshals res as JSON and writes it to w. +// +// Panics if it can't Marshal res or write to w. func WriteRPCResponseHTTPError( w http.ResponseWriter, httpCode int, @@ -101,8 +109,18 @@ func WriteRPCResponseHTTPError( } } -func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { - jsonBytes, err := json.MarshalIndent(res, "", " ") +// WriteRPCResponseHTTP marshals res as JSON and writes it to w. +// +// Panics if it can't Marshal res or write to w. +func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) { + var v interface{} + if len(res) == 1 { + v = res[0] + } else { + v = res + } + + jsonBytes, err := json.MarshalIndent(v, "", " ") if err != nil { panic(err) } @@ -113,25 +131,6 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { } } -// WriteRPCResponseArrayHTTP will do the same as WriteRPCResponseHTTP, except it -// can write arrays of responses for batched request/response interactions via -// the JSON RPC. -func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res []types.RPCResponse) { - if len(res) == 1 { - WriteRPCResponseHTTP(w, res[0]) - } else { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(200) - if _, err := w.Write(jsonBytes); err != nil { - panic(err) - } - } -} - //----------------------------------------------------------------------------- // RecoverAndLogHandler wraps an HTTP handler, adding error logging. @@ -140,11 +139,25 @@ func WriteRPCResponseArrayHTTP(w http.ResponseWriter, res []types.RPCResponse) { func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Wrap the ResponseWriter to remember the status - rww := &ResponseWriterWrapper{-1, w} + rww := &responseWriterWrapper{-1, w} begin := time.Now() rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + defer func() { + // Handle any panics in the panic handler below. Does not use the logger, since we want + // to avoid any further panics. However, we try to return a 500, since it otherwise + // defaults to 200 and there is no other way to terminate the connection. If that + // should panic for whatever reason then the Go HTTP server will handle it and + // terminate the connection - panicing is the de-facto and only way to get the Go HTTP + // server to terminate the request and close the connection/stream: + // https://github.com/golang/go/issues/17790#issuecomment-258481416 + if e := recover(); e != nil { + fmt.Fprintf(os.Stderr, "Panic during RPC panic recovery: %v\n%v\n", e, string(debug.Stack())) + w.WriteHeader(500) + } + }() + defer func() { // Send a 500 error if a panic happens during a handler. // Without this, Chrome & Firefox were retrying aborted ajax requests, @@ -155,7 +168,18 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler if res, ok := e.(types.RPCResponse); ok { WriteRPCResponseHTTP(rww, res) } else { - // For the rest, + // Panics can contain anything, attempt to normalize it as an error. + var err error + switch e := e.(type) { + case error: + err = e + case string: + err = errors.New(e) + case fmt.Stringer: + err = errors.New(e.String()) + default: + } + logger.Error( "Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()), @@ -163,7 +187,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler WriteRPCResponseHTTPError( rww, http.StatusInternalServerError, - types.RPCInternalError(types.JSONRPCIntID(-1), e.(error)), + types.RPCInternalError(types.JSONRPCIntID(-1), err), ) } } @@ -185,18 +209,18 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler } // Remember the status for logging -type ResponseWriterWrapper struct { +type responseWriterWrapper struct { Status int http.ResponseWriter } -func (w *ResponseWriterWrapper) WriteHeader(status int) { +func (w *responseWriterWrapper) WriteHeader(status int) { w.Status = status w.ResponseWriter.WriteHeader(status) } // implements http.Hijacker -func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } @@ -215,7 +239,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func Listen(addr string, config *Config) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { - return nil, errors.Errorf( + return nil, fmt.Errorf( "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", addr, ) @@ -223,7 +247,7 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { proto, addr := parts[0], parts[1] listener, err = net.Listen(proto, addr) if err != nil { - return nil, errors.Errorf("failed to listen on %v: %v", addr, err) + return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) } if config.MaxOpenConnections > 0 { listener = netutil.LimitListener(listener, config.MaxOpenConnections) diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go new file mode 100644 index 000000000..60f3ce126 --- /dev/null +++ b/rpc/jsonrpc/server/http_server_test.go @@ -0,0 +1,180 @@ +package server + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +type sampleResult struct { + Value string `json:"value"` +} + +func TestMaxOpenConnections(t *testing.T) { + const max = 5 // max simultaneous connections + + // Start the server. + var open int32 + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > int32(max) { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + }) + config := DefaultConfig() + config.MaxOpenConnections = max + l, err := Listen("tcp://127.0.0.1:0", config) + require.NoError(t, err) + defer l.Close() + go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests + + // Make N GET calls to the server. + attempts := max * 2 + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + }() + } + wg.Wait() + + // We expect some Gets to fail as the server's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +func TestServeTLS(t *testing.T) { + ln, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + defer ln.Close() + + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "some body") + }) + + chErr := make(chan error, 1) + go func() { + // FIXME This goroutine leaks + chErr <- ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) + }() + + select { + case err := <-chErr: + require.NoError(t, err) + case <-time.After(100 * time.Millisecond): + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + c := &http.Client{Transport: tr} + res, err := c.Get("https://" + ln.Addr().String()) + require.NoError(t, err) + defer res.Body.Close() + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + assert.Equal(t, []byte("some body"), body) +} + +func TestWriteRPCResponseHTTP(t *testing.T) { + id := types.JSONRPCIntID(-1) + + // one argument + w := httptest.NewRecorder() + WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + resp := w.Result() + body, err := ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, 200, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, `{ + "jsonrpc": "2.0", + "id": -1, + "result": { + "value": "hello" + } +}`, string(body)) + + // multiple arguments + w = httptest.NewRecorder() + WriteRPCResponseHTTP(w, + types.NewRPCSuccessResponse(id, &sampleResult{"hello"}), + types.NewRPCSuccessResponse(id, &sampleResult{"world"})) + resp = w.Result() + body, err = ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + + assert.Equal(t, 200, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, `[ + { + "jsonrpc": "2.0", + "id": -1, + "result": { + "value": "hello" + } + }, + { + "jsonrpc": "2.0", + "id": -1, + "result": { + "value": "world" + } + } +]`, string(body)) +} + +func TestWriteRPCResponseHTTPError(t *testing.T) { + w := httptest.NewRecorder() + WriteRPCResponseHTTPError(w, + http.StatusInternalServerError, + types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) + resp := w.Result() + body, err := ioutil.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, `{ + "jsonrpc": "2.0", + "id": -1, + "error": { + "code": -32603, + "message": "Internal error", + "data": "foo" + } +}`, string(body)) +} diff --git a/rpc/lib/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go similarity index 64% rename from rpc/lib/server/http_uri_handler.go rename to rpc/jsonrpc/server/http_uri_handler.go index 4ac33dcc1..3e6250183 100644 --- a/rpc/lib/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -1,32 +1,32 @@ -package rpcserver +package server import ( "encoding/hex" + "fmt" "net/http" "reflect" + "regexp" "strings" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -/////////////////////////////////////////////////////////////////////////////// // HTTP + URI handler -/////////////////////////////////////////////////////////////////////////////// + +var reInt = regexp.MustCompile(`^-?[0-9]+$`) // convert from a function name to the http handler -func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func(http.ResponseWriter, *http.Request) { +func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { // Always return -1 as there's no ID here. dummyID := types.JSONRPCIntID(-1) // URIClientRequestID // Exception for websocket endpoints if rpcFunc.ws { return func(w http.ResponseWriter, r *http.Request) { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(dummyID)) + WriteRPCResponseHTTPError(w, http.StatusNotFound, + types.RPCMethodNotFoundError(dummyID)) } } @@ -37,13 +37,14 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func ctx := &types.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} - fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r) + fnArgs, err := httpParamsToArgs(rpcFunc, r) if err != nil { - WriteRPCResponseHTTP( + WriteRPCResponseHTTPError( w, + http.StatusInternalServerError, types.RPCInvalidParamsError( dummyID, - errors.Wrap(err, "error converting http params to arguments"), + fmt.Errorf("error converting http params to arguments: %w", err), ), ) return @@ -52,19 +53,20 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func returns := rpcFunc.f.Call(args) - logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) + logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) result, err := unreflectResult(returns) if err != nil { - WriteRPCResponseHTTP(w, types.RPCInternalError(dummyID, err)) + WriteRPCResponseHTTPError(w, http.StatusInternalServerError, + types.RPCInternalError(dummyID, err)) return } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, dummyID, result)) + WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(dummyID, result)) } } // Covert an http query to a list of properly typed values. // To be properly decoded the arg must be a concrete type from tendermint (if its an interface). -func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]reflect.Value, error) { +func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { // skip types.Context const argsOffset = 1 @@ -75,14 +77,14 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re values[i] = reflect.Zero(argType) // set default for that type - arg := GetParam(r, name) + arg := getParam(r, name) // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) if arg == "" { continue } - v, ok, err := nonJSONStringToArg(cdc, argType, arg) + v, ok, err := nonJSONStringToArg(argType, arg) if err != nil { return nil, err } @@ -91,7 +93,7 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re continue } - values[i], err = jsonStringToArg(cdc, argType, arg) + values[i], err = jsonStringToArg(argType, arg) if err != nil { return nil, err } @@ -100,9 +102,9 @@ func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]re return values, nil } -func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, error) { +func jsonStringToArg(rt reflect.Type, arg string) (reflect.Value, error) { rv := reflect.New(rt) - err := cdc.UnmarshalJSON([]byte(arg), rv.Interface()) + err := tmjson.Unmarshal([]byte(arg), rv.Interface()) if err != nil { return rv, err } @@ -110,9 +112,9 @@ func jsonStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Val return rv, nil } -func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, bool, error) { +func nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { if rt.Kind() == reflect.Ptr { - rv1, ok, err := nonJSONStringToArg(cdc, rt.Elem(), arg) + rv1, ok, err := nonJSONStringToArg(rt.Elem(), arg) switch { case err != nil: return reflect.Value{}, false, err @@ -124,13 +126,13 @@ func nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect. return reflect.Value{}, false, nil } } else { - return _nonJSONStringToArg(cdc, rt, arg) + return _nonJSONStringToArg(rt, arg) } } // NOTE: rt.Kind() isn't a pointer. -func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect.Value, bool, error) { - isIntString := ReInt.Match([]byte(arg)) +func _nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { + isIntString := reInt.Match([]byte(arg)) isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") @@ -155,7 +157,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if isIntString && expectingInt { qarg := `"` + arg + `"` - rv, err := jsonStringToArg(cdc, rt, qarg) + rv, err := jsonStringToArg(rt, qarg) if err != nil { return rv, false, err } @@ -165,7 +167,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if isHexString { if !expectingString && !expectingByteSlice { - err := errors.Errorf("got a hex string arg, but expected '%s'", + err := fmt.Errorf("got a hex string arg, but expected '%s'", rt.Kind().String()) return reflect.ValueOf(nil), false, err } @@ -183,7 +185,7 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect if isQuotedString && expectingByteSlice { v := reflect.New(reflect.TypeOf("")) - err := cdc.UnmarshalJSON([]byte(arg), v.Interface()) + err := tmjson.Unmarshal([]byte(arg), v.Interface()) if err != nil { return reflect.ValueOf(nil), false, err } @@ -193,3 +195,11 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect return reflect.ValueOf(nil), false, nil } + +func getParam(r *http.Request, param string) string { + s := r.URL.Query().Get(param) + if s == "" { + s = r.FormValue(param) + } + return s +} diff --git a/rpc/lib/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go similarity index 95% rename from rpc/lib/server/parse_test.go rename to rpc/jsonrpc/server/parse_test.go index 09a3d18ee..9f36adca7 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -1,4 +1,4 @@ -package rpcserver +package server import ( "encoding/json" @@ -8,10 +8,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/libs/bytes" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -137,7 +136,6 @@ func TestParseJSONArray(t *testing.T) { func TestParseJSONRPC(t *testing.T) { demo := func(ctx *types.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name") - cdc := amino.NewCodec() cases := []struct { raw string @@ -158,7 +156,7 @@ func TestParseJSONRPC(t *testing.T) { for idx, tc := range cases { i := strconv.Itoa(idx) data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, cdc, data) + vals, err := jsonParamsToArgs(call, data) if tc.fail { assert.NotNil(t, err, i) } else { @@ -175,7 +173,6 @@ func TestParseJSONRPC(t *testing.T) { func TestParseURI(t *testing.T) { demo := func(ctx *types.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name") - cdc := amino.NewCodec() cases := []struct { raw []string @@ -201,7 +198,7 @@ func TestParseURI(t *testing.T) { tc.raw[0], tc.raw[1]) req, err := http.NewRequest("GET", url, nil) assert.NoError(t, err) - vals, err := httpParamsToArgs(call, cdc, req) + vals, err := httpParamsToArgs(call, req) if tc.fail { assert.NotNil(t, err, i) } else { diff --git a/rpc/lib/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go similarity index 85% rename from rpc/lib/server/rpc_func.go rename to rpc/jsonrpc/server/rpc_func.go index 906533328..e5855c314 100644 --- a/rpc/lib/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -1,14 +1,11 @@ -package rpcserver +package server import ( + "fmt" "net/http" "reflect" "strings" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" ) @@ -16,19 +13,17 @@ import ( // general jsonrpc and websocket handlers for all functions. "result" is the // interface on which the result objects are registered, and is popualted with // every RPCResponse -func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) { +func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { // HTTP endpoints for funcName, rpcFunc := range funcMap { - mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, cdc, logger)) + mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger)) } // JSONRPC endpoints - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, cdc, logger))) + mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) } -/////////////////////////////////////////////////////////////////////////////// // Function introspection -/////////////////////////////////////////////////////////////////////////////// // RPCFunc contains the introspected type information for a function type RPCFunc struct { @@ -92,7 +87,7 @@ func funcReturnTypes(f interface{}) []reflect.Type { func unreflectResult(returns []reflect.Value) (interface{}, error) { errV := returns[1] if errV.Interface() != nil { - return nil, errors.Errorf("%v", errV.Interface()) + return nil, fmt.Errorf("%v", errV.Interface()) } rv := returns[0] // the result is a registered interface, diff --git a/rpc/lib/server/test.crt b/rpc/jsonrpc/server/test.crt similarity index 100% rename from rpc/lib/server/test.crt rename to rpc/jsonrpc/server/test.crt diff --git a/rpc/lib/server/test.key b/rpc/jsonrpc/server/test.key similarity index 100% rename from rpc/lib/server/test.key rename to rpc/jsonrpc/server/test.key diff --git a/rpc/lib/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go similarity index 83% rename from rpc/lib/server/ws_handler.go rename to rpc/jsonrpc/server/ws_handler.go index e7048db79..e4e5d7504 100644 --- a/rpc/lib/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -1,8 +1,9 @@ -package rpcserver +package server import ( "context" "encoding/json" + "errors" "fmt" "net/http" "reflect" @@ -10,21 +11,16 @@ import ( "time" "github.com/gorilla/websocket" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -/////////////////////////////////////////////////////////////////////////////// // WebSocket handler -/////////////////////////////////////////////////////////////////////////////// const ( - defaultWSWriteChanCapacity = 1000 + defaultWSWriteChanCapacity = 100 defaultWSWriteWait = 10 * time.Second defaultWSReadWait = 30 * time.Second defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 @@ -37,7 +33,6 @@ type WebsocketManager struct { websocket.Upgrader funcMap map[string]*RPCFunc - cdc *amino.Codec logger log.Logger wsConnOptions []func(*wsConnection) } @@ -46,12 +41,10 @@ type WebsocketManager struct { // functions, connection options and logger to new WS connections. func NewWebsocketManager( funcMap map[string]*RPCFunc, - cdc *amino.Codec, wsConnOptions ...func(*wsConnection), ) *WebsocketManager { return &WebsocketManager{ funcMap: funcMap, - cdc: cdc, Upgrader: websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { // TODO ??? @@ -92,7 +85,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ }() // register connection - con := newWSConnection(wsConn, wm.funcMap, wm.cdc, wm.wsConnOptions...) + con := newWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) wm.logger.Info("New websocket connection", "remote", con.remoteAddr) err = con.Start() // BLOCKING @@ -100,12 +93,12 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ wm.logger.Error("Failed to start connection", "err", err) return } - con.Stop() + if err := con.Stop(); err != nil { + wm.logger.Error("error while stopping connection", "error", err) + } } -/////////////////////////////////////////////////////////////////////////////// // WebSocket connection -/////////////////////////////////////////////////////////////////////////////// // A single websocket connection contains listener id, underlying ws // connection, and the event switch for subscribing to events. @@ -124,7 +117,6 @@ type wsConnection struct { readRoutineQuit chan struct{} funcMap map[string]*RPCFunc - cdc *amino.Codec // write channel capacity writeChanCapacity int @@ -157,14 +149,12 @@ type wsConnection struct { func newWSConnection( baseConn *websocket.Conn, funcMap map[string]*RPCFunc, - cdc *amino.Codec, options ...func(*wsConnection), ) *wsConnection { wsc := &wsConnection{ remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, funcMap: funcMap, - cdc: cdc, writeWait: defaultWSWriteWait, writeChanCapacity: defaultWSWriteChanCapacity, readWait: defaultWSReadWait, @@ -258,17 +248,22 @@ func (wsc *wsConnection) GetRemoteAddr() string { return wsc.remoteAddr } -// WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. +// WriteRPCResponse pushes a response to the writeChan, and blocks until it is +// accepted. // It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { +func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { select { case <-wsc.Quit(): - return + return errors.New("connection was stopped") + case <-ctx.Done(): + return ctx.Err() case wsc.writeChan <- resp: + return nil } } -// TryWriteRPCResponse attempts to push a response to the writeChan, but does not block. +// TryWriteRPCResponse attempts to push a response to the writeChan, but does +// not block. // It implements WSRPCConnection. It is Goroutine-safe func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { select { @@ -281,12 +276,6 @@ func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { } } -// Codec returns an amino codec used to decode parameters and encode results. -// It implements WSRPCConnection. -func (wsc *wsConnection) Codec() *amino.Codec { - return wsc.cdc -} - // Context returns the connection's context. // The context is canceled when the client's connection closes. func (wsc *wsConnection) Context() context.Context { @@ -299,6 +288,9 @@ func (wsc *wsConnection) Context() context.Context { // Read from the socket and subscribe to or unsubscribe from events func (wsc *wsConnection) readRoutine() { + // readRoutine will block until response is written or WS connection is closed + writeCtx := context.Background() + defer func() { if r := recover(); r != nil { err, ok := r.(error) @@ -306,7 +298,9 @@ func (wsc *wsConnection) readRoutine() { err = fmt.Errorf("WSJSONRPC: %v", r) } wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - wsc.WriteRPCResponse(types.RPCInternalError(types.JSONRPCIntID(-1), err)) + if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } go wsc.readRoutine() } }() @@ -324,23 +318,29 @@ func (wsc *wsConnection) readRoutine() { if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { wsc.Logger.Error("failed to set read deadline", "err", err) } - var in []byte - _, in, err := wsc.baseConn.ReadMessage() + + _, r, err := wsc.baseConn.NextReader() if err != nil { if websocket.IsCloseError(err, websocket.CloseNormalClosure) { wsc.Logger.Info("Client closed the connection") } else { wsc.Logger.Error("Failed to read request", "err", err) } - wsc.Stop() + if err := wsc.Stop(); err != nil { + wsc.Logger.Error("Error closing websocket connection", "err", err) + } close(wsc.readRoutineQuit) return } + dec := json.NewDecoder(r) var request types.RPCRequest - err = json.Unmarshal(in, &request) + err = dec.Decode(&request) if err != nil { - wsc.WriteRPCResponse(types.RPCParseError(errors.Wrap(err, "error unmarshaling request"))) + if err := wsc.WriteRPCResponse(writeCtx, + types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } continue } @@ -357,18 +357,22 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID)) + if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } continue } ctx := &types.Context{JSONReq: &request, WSConn: wsc} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params) + fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { - wsc.WriteRPCResponse( - types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments")), - ) + if err := wsc.WriteRPCResponse(writeCtx, + types.RPCInternalError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + ); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } continue } args = append(args, fnArgs...) @@ -381,11 +385,15 @@ func (wsc *wsConnection) readRoutine() { result, err := unreflectResult(returns) if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err)) + if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(request.ID, err)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } continue } - wsc.WriteRPCResponse(types.NewRPCSuccessResponse(wsc.cdc, request.ID, result)) + if err := wsc.WriteRPCResponse(writeCtx, types.NewRPCSuccessResponse(request.ID, result)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } } } } @@ -393,9 +401,7 @@ func (wsc *wsConnection) readRoutine() { // receives on a write channel and writes out on the socket func (wsc *wsConnection) writeRoutine() { pingTicker := time.NewTicker(wsc.pingPeriod) - defer func() { - pingTicker.Stop() - }() + defer pingTicker.Stop() // https://github.com/gorilla/websocket/issues/97 pongs := make(chan string, 1) @@ -428,8 +434,10 @@ func (wsc *wsConnection) writeRoutine() { jsonBytes, err := json.MarshalIndent(msg, "", " ") if err != nil { wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) - } else if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "msg", msg, "err", err) + continue + } + if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { + wsc.Logger.Error("Failed to write response", "err", err, "msg", msg) return } } diff --git a/rpc/lib/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go similarity index 87% rename from rpc/lib/server/ws_handler_test.go rename to rpc/jsonrpc/server/ws_handler_test.go index f58e17ee7..42a96d1d3 100644 --- a/rpc/lib/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -1,4 +1,4 @@ -package rpcserver +package server import ( "net/http" @@ -8,10 +8,8 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/lib/types" + types "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestWebsocketManagerHandler(t *testing.T) { @@ -29,7 +27,6 @@ func TestWebsocketManagerHandler(t *testing.T) { // check basic functionality works req, err := types.MapToRequest( - amino.NewCodec(), types.JSONRPCStringID("TestWebsocketManager"), "c", map[string]interface{}{"s": "a", "i": 10}, @@ -49,7 +46,7 @@ func newWSServer() *httptest.Server { funcMap := map[string]*RPCFunc{ "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } - wm := NewWebsocketManager(funcMap, amino.NewCodec()) + wm := NewWebsocketManager(funcMap) wm.SetLogger(log.TestingLogger()) mux := http.NewServeMux() diff --git a/rpc/lib/test/data.json b/rpc/jsonrpc/test/data.json similarity index 100% rename from rpc/lib/test/data.json rename to rpc/jsonrpc/test/data.json diff --git a/rpc/lib/test/integration_test.sh b/rpc/jsonrpc/test/integration_test.sh similarity index 100% rename from rpc/lib/test/integration_test.sh rename to rpc/jsonrpc/test/integration_test.sh diff --git a/rpc/lib/test/main.go b/rpc/jsonrpc/test/main.go similarity index 72% rename from rpc/lib/test/main.go rename to rpc/jsonrpc/test/main.go index a7141048c..fe3ffb769 100644 --- a/rpc/lib/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -5,12 +5,10 @@ import ( "net/http" "os" - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var routes = map[string]*rpcserver.RPCFunc{ @@ -28,18 +26,20 @@ type Result struct { func main() { var ( mux = http.NewServeMux() - cdc = amino.NewCodec() logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) ) // Stop upon receiving SIGTERM or CTRL-C. tmos.TrapSignal(logger, func() {}) - rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger) + rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config) if err != nil { tmos.Exit(err.Error()) } - rpcserver.StartHTTPServer(listener, mux, logger, config) + + if err = rpcserver.Serve(listener, mux, logger, config); err != nil { + tmos.Exit(err.Error()) + } } diff --git a/rpc/lib/types/types.go b/rpc/jsonrpc/types/types.go similarity index 87% rename from rpc/lib/types/types.go rename to rpc/jsonrpc/types/types.go index 923dc8c46..54d17155c 100644 --- a/rpc/lib/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -1,4 +1,4 @@ -package rpctypes +package types import ( "context" @@ -8,9 +8,7 @@ import ( "reflect" "strings" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" + tmjson "github.com/tendermint/tendermint/libs/json" ) // a wrapper to emulate a sum type: jsonrpcid = string | int @@ -96,17 +94,17 @@ func (req RPCRequest) String() string { return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID, req.Method, req.Params) } -func MapToRequest(cdc *amino.Codec, id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { +func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { var paramsMap = make(map[string]json.RawMessage, len(params)) for name, value := range params { - valueJSON, err := cdc.MarshalJSON(value) + valueJSON, err := tmjson.Marshal(value) if err != nil { return RPCRequest{}, err } paramsMap[name] = valueJSON } - payload, err := json.Marshal(paramsMap) // NOTE: Amino doesn't handle maps yet. + payload, err := json.Marshal(paramsMap) if err != nil { return RPCRequest{}, err } @@ -114,17 +112,17 @@ func MapToRequest(cdc *amino.Codec, id jsonrpcid, method string, params map[stri return NewRPCRequest(id, method, payload), nil } -func ArrayToRequest(cdc *amino.Codec, id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { +func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { var paramsMap = make([]json.RawMessage, len(params)) for i, value := range params { - valueJSON, err := cdc.MarshalJSON(value) + valueJSON, err := tmjson.Marshal(value) if err != nil { return RPCRequest{}, err } paramsMap[i] = valueJSON } - payload, err := json.Marshal(paramsMap) // NOTE: Amino doesn't handle maps yet. + payload, err := json.Marshal(paramsMap) if err != nil { return RPCRequest{}, err } @@ -182,14 +180,14 @@ func (resp *RPCResponse) UnmarshalJSON(data []byte) error { return nil } -func NewRPCSuccessResponse(cdc *amino.Codec, id jsonrpcid, res interface{}) RPCResponse { +func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { var rawMsg json.RawMessage if res != nil { var js []byte - js, err := cdc.MarshalJSON(res) + js, err := tmjson.Marshal(res) if err != nil { - return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) + return RPCInternalError(id, fmt.Errorf("error marshalling response: %w", err)) } rawMsg = json.RawMessage(js) } @@ -207,7 +205,7 @@ func NewRPCErrorResponse(id jsonrpcid, code int, msg string, data string) RPCRes func (resp RPCResponse) String() string { if resp.Error == nil { - return fmt.Sprintf("RPCResponse{%s %v}", resp.ID, resp.Result) + return fmt.Sprintf("RPCResponse{%s %X}", resp.ID, resp.Result) } return fmt.Sprintf("RPCResponse{%s %v}", resp.ID, resp.Error) } @@ -248,12 +246,10 @@ func RPCServerError(id jsonrpcid, err error) RPCResponse { type WSRPCConnection interface { // GetRemoteAddr returns a remote address of the connection. GetRemoteAddr() string - // WriteRPCResponse writes the resp onto connection (BLOCKING). - WriteRPCResponse(resp RPCResponse) - // TryWriteRPCResponse tries to write the resp onto connection (NON-BLOCKING). - TryWriteRPCResponse(resp RPCResponse) bool - // Codec returns an Amino codec used. - Codec() *amino.Codec + // WriteRPCResponse writes the response onto connection (BLOCKING). + WriteRPCResponse(context.Context, RPCResponse) error + // TryWriteRPCResponse tries to write the response onto connection (NON-BLOCKING). + TryWriteRPCResponse(RPCResponse) bool // Context returns the connection's context. Context() context.Context } diff --git a/rpc/lib/types/types_test.go b/rpc/jsonrpc/types/types_test.go similarity index 88% rename from rpc/lib/types/types_test.go rename to rpc/jsonrpc/types/types_test.go index 4597b0481..8434f208b 100644 --- a/rpc/lib/types/types_test.go +++ b/rpc/jsonrpc/types/types_test.go @@ -1,14 +1,12 @@ -package rpctypes +package types import ( "encoding/json" - "testing" - + "errors" "fmt" + "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" - amino "github.com/tendermint/go-amino" ) type SampleResult struct { @@ -33,10 +31,9 @@ var responseTests = []responseTest{ func TestResponses(t *testing.T) { assert := assert.New(t) - cdc := amino.NewCodec() for _, tt := range responseTests { jsonid := tt.id - a := NewRPCSuccessResponse(cdc, jsonid, &SampleResult{"hello"}) + a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) b, _ := json.Marshal(a) s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) assert.Equal(s, string(b)) @@ -55,7 +52,6 @@ func TestResponses(t *testing.T) { func TestUnmarshallResponses(t *testing.T) { assert := assert.New(t) - cdc := amino.NewCodec() for _, tt := range responseTests { response := &RPCResponse{} err := json.Unmarshal( @@ -63,7 +59,7 @@ func TestUnmarshallResponses(t *testing.T) { response, ) assert.Nil(err) - a := NewRPCSuccessResponse(cdc, tt.id, &SampleResult{"hello"}) + a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) assert.Equal(*response, a) } response := &RPCResponse{} diff --git a/rpc/lib/client/http_uri_client.go b/rpc/lib/client/http_uri_client.go deleted file mode 100644 index ecaee7ad3..000000000 --- a/rpc/lib/client/http_uri_client.go +++ /dev/null @@ -1,81 +0,0 @@ -package rpcclient - -import ( - "io/ioutil" - "net/http" - - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - - types "github.com/tendermint/tendermint/rpc/lib/types" -) - -const ( - // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = types.JSONRPCIntID(-1) -) - -// URIClient is a JSON-RPC client, which sends POST form HTTP requests to the -// remote server. -// -// Request values are amino encoded. Response is expected to be amino encoded. -// New amino codec is used if no other codec was set using SetCodec. -// -// URIClient is safe for concurrent use by multiple goroutines. -type URIClient struct { - address string - client *http.Client - cdc *amino.Codec -} - -var _ HTTPClient = (*URIClient)(nil) - -// NewURIClient returns a new client. -// An error is returned on invalid remote. -// The function panics when remote is nil. -func NewURIClient(remote string) (*URIClient, error) { - parsedURL, err := newParsedURL(remote) - if err != nil { - return nil, err - } - - httpClient, err := DefaultHTTPClient(remote) - if err != nil { - return nil, err - } - - parsedURL.SetDefaultSchemeHTTP() - - uriClient := &URIClient{ - address: parsedURL.GetTrimmedURL(), - client: httpClient, - cdc: amino.NewCodec(), - } - - return uriClient, nil -} - -// Call issues a POST form HTTP request. -func (c *URIClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - values, err := argsToURLValues(c.cdc, params) - if err != nil { - return nil, errors.Wrap(err, "failed to encode params") - } - - resp, err := c.client.PostForm(c.address+"/"+method, values) - if err != nil { - return nil, errors.Wrap(err, "PostForm failed") - } - defer resp.Body.Close() // nolint: errcheck - - responseBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errors.Wrap(err, "failed to read response body") - } - - return unmarshalResponseBytes(c.cdc, responseBytes, URIClientRequestID, result) -} - -func (c *URIClient) Codec() *amino.Codec { return c.cdc } -func (c *URIClient) SetCodec(cdc *amino.Codec) { c.cdc = cdc } diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go deleted file mode 100644 index 075afb666..000000000 --- a/rpc/lib/server/http_params.go +++ /dev/null @@ -1,91 +0,0 @@ -package rpcserver - -import ( - "encoding/hex" - "net/http" - "regexp" - "strconv" - - "github.com/pkg/errors" -) - -var ( - // Parts of regular expressions - atom = "[A-Z0-9!#$%&'*+\\-/=?^_`{|}~]+" - dotAtom = atom + `(?:\.` + atom + `)*` - domain = `[A-Z0-9.-]+\.[A-Z]{2,4}` - - ReInt = regexp.MustCompile(`^-?[0-9]+$`) - ReHex = regexp.MustCompile(`^(?i)[a-f0-9]+$`) - ReEmail = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`) - ReAddress = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`) - ReHost = regexp.MustCompile(`^(?i)(` + domain + `)$`) - - //RE_ID12 = regexp.MustCompile(`^[a-zA-Z0-9]{12}$`) -) - -func GetParam(r *http.Request, param string) string { - s := r.URL.Query().Get(param) - if s == "" { - s = r.FormValue(param) - } - return s -} - -func GetParamByteSlice(r *http.Request, param string) ([]byte, error) { - s := GetParam(r, param) - return hex.DecodeString(s) -} - -func GetParamInt64(r *http.Request, param string) (int64, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return i, nil -} - -func GetParamInt32(r *http.Request, param string) (int32, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return int32(i), nil -} - -func GetParamUint64(r *http.Request, param string) (uint64, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return i, nil -} - -func GetParamUint(r *http.Request, param string) (uint, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return uint(i), nil -} - -func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { - s := GetParam(r, param) - if !re.MatchString(s) { - return "", errors.Errorf(param, "did not match regular expression %v", re.String()) - } - return s, nil -} - -func GetParamFloat64(r *http.Request, param string) (float64, error) { - s := GetParam(r, param) - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return f, nil -} diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go deleted file mode 100644 index b463aa6a8..000000000 --- a/rpc/lib/server/http_server_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package rpcserver - -import ( - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/log" -) - -func TestMaxOpenConnections(t *testing.T) { - const max = 5 // max simultaneous connections - - // Start the server. - var open int32 - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - if n := atomic.AddInt32(&open, 1); n > int32(max) { - t.Errorf("%d open connections, want <= %d", n, max) - } - defer atomic.AddInt32(&open, -1) - time.Sleep(10 * time.Millisecond) - fmt.Fprint(w, "some body") - }) - config := DefaultConfig() - config.MaxOpenConnections = max - l, err := Listen("tcp://127.0.0.1:0", config) - require.NoError(t, err) - defer l.Close() - go StartHTTPServer(l, mux, log.TestingLogger(), config) - - // Make N GET calls to the server. - attempts := max * 2 - var wg sync.WaitGroup - var failed int32 - for i := 0; i < attempts; i++ { - wg.Add(1) - go func() { - defer wg.Done() - c := http.Client{Timeout: 3 * time.Second} - r, err := c.Get("http://" + l.Addr().String()) - if err != nil { - t.Log(err) - atomic.AddInt32(&failed, 1) - return - } - defer r.Body.Close() - io.Copy(ioutil.Discard, r.Body) - }() - } - wg.Wait() - - // We expect some Gets to fail as the server's accept queue is filled, - // but most should succeed. - if int(failed) >= attempts/2 { - t.Errorf("%d requests failed within %d attempts", failed, attempts) - } -} - -func TestStartHTTPAndTLSServer(t *testing.T) { - ln, err := net.Listen("tcp", "localhost:0") - require.NoError(t, err) - defer ln.Close() - - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, "some body") - }) - - go StartHTTPAndTLSServer(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint: gosec - } - c := &http.Client{Transport: tr} - res, err := c.Get("https://" + ln.Addr().String()) - require.NoError(t, err) - defer res.Body.Close() - assert.Equal(t, http.StatusOK, res.StatusCode) - - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, []byte("some body"), body) -} diff --git a/rpc/openapi/index.html b/rpc/openapi/index.html new file mode 100644 index 000000000..f4e5d0a12 --- /dev/null +++ b/rpc/openapi/index.html @@ -0,0 +1,27 @@ + + + + + + + Tendermint RPC + + + + + + +
+ + + + diff --git a/rpc/swagger/swagger.yaml b/rpc/openapi/openapi.yaml similarity index 69% rename from rpc/swagger/swagger.yaml rename to rpc/openapi/openapi.yaml index f2631c2e9..7473b8620 100644 --- a/rpc/swagger/swagger.yaml +++ b/rpc/openapi/openapi.yaml @@ -1,7 +1,56 @@ openapi: 3.0.0 info: - title: RPC client for Tendermint - description: A REST interface for state queries, transaction generation and broadcasting. + title: Tendermint RPC + contact: + name: Tendermint RPC + url: https://github.com/tendermint/tendermint/issues/new/choose + description: | + Tendermint supports the following RPC protocols: + + * URI over HTTP + * JSONRPC over HTTP + * JSONRPC over websockets + + ## Configuration + + RPC can be configured by tuning parameters under `[rpc]` table in the + `$TMHOME/config/config.toml` file or by using the `--rpc.X` command-line + flags. + + Default rpc listen address is `tcp://0.0.0.0:26657`. + To set another address, set the `laddr` config parameter to desired value. + CORS (Cross-Origin Resource Sharing) can be enabled by setting + `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` + config parameters. + + ## Arguments + + Arguments which expect strings or byte arrays may be passed as quoted + strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`. + + ## URI/HTTP + + A REST like interface. + + curl localhost:26657/block?height=5 + + ## JSONRPC/HTTP + + JSONRPC requests can be POST'd to the root RPC endpoint via HTTP. + + curl --header "Content-Type: application/json" --request POST --data '{"method": "block", "params": ["5"], "id": 1}' localhost:26657 + + ## JSONRPC/websockets + + JSONRPC requests can be also made via websocket. + The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. + Asynchronous RPC functions like event `subscribe` and `unsubscribe` are + only available via websockets. + + Example using https://github.com/hashrocket/ws: + + ws ws://localhost:26657/websocket + > { "jsonrpc": "2.0", "method": "subscribe", "params": ["tm.event='NewBlock'"], "id": 1 } version: "Master" license: name: Apache 2.0 @@ -22,6 +71,8 @@ tags: description: ABCI APIs - name: Evidence description: Evidence APIs + - name: Unsafe + description: Unsafe APIs paths: /broadcast_tx_sync: get: @@ -52,16 +103,16 @@ paths: required: true schema: type: string - example: "456" + example: "456" description: The transaction responses: - 200: + "200": description: Empty content: application/json: schema: $ref: "#/components/schemas/BroadcastTxResponse" - 500: + "500": description: Error content: application/json: @@ -99,13 +150,13 @@ paths: example: "123" description: The transaction responses: - 200: + "200": description: empty answer content: application/json: schema: $ref: "#/components/schemas/BroadcastTxResponse" - 500: + "500": description: empty error content: application/json: @@ -141,13 +192,46 @@ paths: example: "785" description: The transaction responses: - 200: + "200": description: empty answer content: application/json: schema: $ref: "#/components/schemas/BroadcastTxCommitResponse" - 500: + "500": + description: empty error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /check_tx: + get: + summary: Checks the transaction without executing it. + tags: + - Tx + operationId: check_tx + description: | + The transaction won't be added to the mempool. + + Please refer to + https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + for formatting/encoding rules. + parameters: + - in: query + name: tx + required: true + schema: + type: string + example: "785" + description: The transaction + responses: + "200": + description: ABCI application's CheckTx response + content: + application/json: + schema: + $ref: "#/components/schemas/CheckTxResponse" + "500": description: empty error content: application/json: @@ -187,28 +271,28 @@ paths: Events: []abci.Event{ { Type: "rewards.withdraw", - Attributes: kv.Pairs{ - kv.Pair{Key: []byte("address"), Value: []byte("AddrA")}, - kv.Pair{Key: []byte("source"), Value: []byte("SrcX")}, - kv.Pair{Key: []byte("amount"), Value: []byte("...")}, - kv.Pair{Key: []byte("balance"), Value: []byte("...")}, + Attributes: abci.EventAttribute{ + {Key: []byte("address"), Value: []byte("AddrA"), Index: true}, + {Key: []byte("source"), Value: []byte("SrcX"), Index: true}, + {Key: []byte("amount"), Value: []byte("..."), Index: true}, + {Key: []byte("balance"), Value: []byte("..."), Index: true}, }, }, { Type: "rewards.withdraw", - Attributes: kv.Pairs{ - kv.Pair{Key: []byte("address"), Value: []byte("AddrB")}, - kv.Pair{Key: []byte("source"), Value: []byte("SrcY")}, - kv.Pair{Key: []byte("amount"), Value: []byte("...")}, - kv.Pair{Key: []byte("balance"), Value: []byte("...")}, + Attributes: abci.EventAttribute{ + {Key: []byte("address"), Value: []byte("AddrB"), Index: true}, + {Key: []byte("source"), Value: []byte("SrcY"), Index: true}, + {Key: []byte("amount"), Value: []byte("..."), Index: true}, + {Key: []byte("balance"), Value: []byte("..."), Index: true}, }, }, { Type: "transfer", - Attributes: kv.Pairs{ - kv.Pair{Key: []byte("sender"), Value: []byte("AddrC")}, - kv.Pair{Key: []byte("recipient"), Value: []byte("AddrD")}, - kv.Pair{Key: []byte("amount"), Value: []byte("...")}, + Attributes: abci.EventAttribute{ + {Key: []byte("sender"), Value: []byte("AddrC"), Index: true}, + {Key: []byte("recipient"), Value: []byte("AddrD"), Index: true}, + {Key: []byte("amount"), Value: []byte("..."), Index: true}, }, }, }, @@ -276,6 +360,9 @@ paths: } }() ``` + + NOTE: if you're not reading events fast enough, Tendermint might + terminate the subscription. parameters: - in: query name: query @@ -290,13 +377,13 @@ paths: operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a string (escaped with single quotes), number, date or time. responses: - 200: + "200": description: empty answer content: application/json: schema: $ref: "#/components/schemas/EmptyResponse" - 500: + "500": description: empty error content: application/json: @@ -336,13 +423,13 @@ paths: operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a string (escaped with single quotes), number, date or time. responses: - 200: + "200": description: Answer content: application/json: schema: $ref: "#/components/schemas/EmptyResponse" - 500: + "500": description: Error content: application/json: @@ -357,13 +444,13 @@ paths: description: | Unsubscribe from all events via WebSocket responses: - 200: + "200": description: empty answer content: application/json: schema: $ref: "#/components/schemas/EmptyResponse" - 500: + "500": description: empty error content: application/json: @@ -378,13 +465,13 @@ paths: description: | Get node health. Returns empty result (200 OK) on success, no response - in case of an error. responses: - 200: + "200": description: Gets Node Health content: application/json: schema: $ref: "#/components/schemas/EmptyResponse" - 500: + "500": description: empty error content: application/json: @@ -399,13 +486,13 @@ paths: description: | Get Tendermint status including node info, pubkey, latest block hash, app hash, block height and time. responses: - 200: + "200": description: Status of the node content: application/json: schema: $ref: "#/components/schemas/StatusResponse" - 500: + "500": description: empty error content: application/json: @@ -420,69 +507,97 @@ paths: description: | Get network info. responses: - 200: + "200": description: empty answer content: application/json: schema: $ref: "#/components/schemas/NetInfoResponse" - 500: + "500": description: empty error content: application/json: schema: $ref: "#/components/schemas/ErrorResponse" /dial_seeds: - post: + get: summary: Dial Seeds (Unsafe) operationId: dial_seeds tags: - - unsafe + - Unsafe description: | Dial a peer, this route in under unsafe, and has to manually enabled to use + + **Example:** curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' parameters: - - description: string of possible peers - in: body - name: Array of peers to connect to - required: true + - in: query + name: peers + description: list of seed nodes to dial schema: - $ref: "#/components/schemas/dialSeedsPost" + type: array + items: + type: string + example: + ["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656"] responses: - 200: + "200": description: Dialing seeds in progress. See /net_info for details content: application/json: schema: $ref: "#/components/schemas/dialResp" - 500: + "500": description: empty error content: application/json: schema: $ref: "#/components/schemas/ErrorResponse" /dial_peers: - post: + get: summary: Add Peers/Persistent Peers (unsafe) operationId: dial_peers tags: - - unsafe + - Unsafe description: | - Set a persistent peer, this route in under unsafe, and has to manually enabled to use + Set a persistent peer, this route in under unsafe, and has to manually enabled to use. + + **Example:** curl 'localhost:26657/dial_peers?peers=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]&persistent=false' parameters: - - description: string of possible peers, bool argument if they should be added as persistent - in: body - name: Array of peers to connect to & if they should be persistent - required: true + - in: query + name: persistent + description: Have the peers you are dialing be persistent + schema: + type: boolean + example: true + - in: query + name: unconditional + description: Have the peers you are dialing be unconditional + schema: + type: boolean + example: true + - in: query + name: private + description: Have the peers you are dialing be private + schema: + type: boolean + example: true + - in: query + name: peers + description: array of peers to dial schema: - $ref: "#/components/schemas/dialPeersPost" + type: array + items: + type: string + example: + ["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656"] responses: - 200: + "200": description: Dialing seeds in progress. See /net_info for details content: application/json: schema: $ref: "#/components/schemas/dialResp" - 500: + "500": description: empty error content: application/json: @@ -490,33 +605,40 @@ paths: $ref: "#/components/schemas/ErrorResponse" /blockchain: get: - summary: Get block headers for minHeight <= height <= maxHeight. + summary: "Get block headers (max: 20) for minHeight <= height <= maxHeight." operationId: blockchain parameters: - in: query name: minHeight description: Minimum block height to return schema: - type: number + type: integer example: 1 - in: query name: maxHeight description: Maximum block height to return schema: - type: number + type: integer example: 2 tags: - Info description: | - Get Blockchain info. + Get block headers for minHeight <= height maxHeight. + + If maxHeight does not yet exist, blocks up to the current height will + be returned. If minHeight does not exist (due to pruning), earliest + existing height will be used. + + At most 20 items will be returned. Block headers are returned in + descending order (highest first). responses: - 200: + "200": description: Block headers, returned in descending order (highest first). content: application/json: schema: $ref: "#/components/schemas/BlockchainResponse" - 500: + "500": description: Error content: application/json: @@ -530,7 +652,7 @@ paths: - in: query name: height schema: - type: number + type: integer default: 0 example: 1 description: height to return. If no height is provided, it will fetch the latest block. @@ -539,13 +661,13 @@ paths: description: | Get Block. responses: - 200: + "200": description: Block informations. content: application/json: schema: $ref: "#/components/schemas/BlockResponse" - 500: + "500": description: Error content: application/json: @@ -568,13 +690,13 @@ paths: description: | Get Block By Hash. responses: - 200: + "200": description: Block informations. content: application/json: schema: $ref: "#/components/schemas/BlockResponse" - 500: + "500": description: Error content: application/json: @@ -589,7 +711,7 @@ paths: name: height description: height to return. If no height is provided, it will fetch informations regarding the latest block. schema: - type: number + type: integer default: 0 example: 1 tags: @@ -597,13 +719,13 @@ paths: description: | Get block_results. responses: - 200: + "200": description: Block results. content: application/json: schema: $ref: "#/components/schemas/BlockResultsResponse" - 500: + "500": description: Error content: application/json: @@ -618,7 +740,7 @@ paths: name: height description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. schema: - type: number + type: integer default: 0 example: 1 tags: @@ -626,13 +748,16 @@ paths: description: | Get Commit. responses: - 200: - description: Commit results. + "200": + description: | + Commit results. + + canonical switches from false to true for block H once block H+1 has been committed. Until then it's subjective and only reflects what this node has seen so far. content: application/json: schema: $ref: "#/components/schemas/CommitResponse" - 500: + "500": description: Error content: application/json: @@ -647,7 +772,7 @@ paths: name: height description: height to return. If no height is provided, it will fetch validator set which corresponds to the latest block. schema: - type: number + type: integer default: 0 example: 1 - in: query @@ -655,29 +780,29 @@ paths: description: "Page number (1-based)" required: false schema: - type: number - default: 0 + type: integer + default: 1 example: 1 - in: query name: per_page description: "Number of entries per page (max: 100)" required: false schema: - type: number + type: integer example: 30 default: 30 tags: - Info description: | - Get Validators. + Get Validators. Validators are sorted first by voting power (descending), then by address (ascending). responses: - 200: + "200": description: Commit results. content: application/json: schema: $ref: "#/components/schemas/ValidatorsResponse" - 500: + "500": description: Error content: application/json: @@ -692,13 +817,13 @@ paths: description: | Get genesis. responses: - 200: + "200": description: Genesis results. content: application/json: schema: $ref: "#/components/schemas/GenesisResponse" - 500: + "500": description: Error content: application/json: @@ -712,14 +837,19 @@ paths: - Info description: | Get consensus state. + + Not safe to call from inside the ABCI application during a block execution. responses: - 200: - description: consensus state results. + "200": + description: | + Complete consensus state. + + See https://pkg.go.dev/github.com/tendermint/tendermint/types?tab=doc#Vote.String for Vote string description. content: application/json: schema: $ref: "#/components/schemas/DumpConsensusResponse" - 500: + "500": description: Error content: application/json: @@ -733,14 +863,16 @@ paths: - Info description: | Get consensus state. + + Not safe to call from inside the ABCI application during a block execution. responses: - 200: + "200": description: consensus state results. content: application/json: schema: $ref: "#/components/schemas/ConsensusStateResponse" - 500: + "500": description: Error content: application/json: @@ -755,7 +887,7 @@ paths: name: height description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. schema: - type: number + type: integer default: 0 example: 1 tags: @@ -763,13 +895,13 @@ paths: description: | Get consensus parameters. responses: - 200: + "200": description: consensus parameters results. content: application/json: schema: $ref: "#/components/schemas/ConsensusParamsResponse" - 500: + "500": description: Error content: application/json: @@ -782,22 +914,24 @@ paths: parameters: - in: query name: limit - description: Maximum number of unconfirmed transactions to return + description: Maximum number of unconfirmed transactions to return (max 100) + required: false schema: - type: number + type: integer + default: 30 example: 1 tags: - Info description: | Get list of unconfirmed transactions responses: - 200: + "200": description: List of unconfirmed transactions content: application/json: schema: $ref: "#/components/schemas/UnconfirmedTransactionsResponse" - 500: + "500": description: Error content: application/json: @@ -812,13 +946,13 @@ paths: description: | Get data about unconfirmed transactions responses: - 200: + "200": description: status about unconfirmed transactions content: application/json: schema: $ref: "#/components/schemas/NumUnconfirmedTransactionsResponse" - 500: + "500": description: Error content: application/json: @@ -827,6 +961,10 @@ paths: /tx_search: get: summary: Search for transactions + description: | + Search for transactions w/ their results. + + See /subscribe for the query syntax. operationId: tx_search parameters: - in: query @@ -849,7 +987,7 @@ paths: description: "Page number (1-based)" required: false schema: - type: number + type: integer default: 1 example: 1 - in: query @@ -857,7 +995,7 @@ paths: description: "Number of entries per page (max: 100)" required: false schema: - type: number + type: integer default: 30 example: 30 - in: query @@ -870,16 +1008,14 @@ paths: example: "asc" tags: - Info - description: | - Get list of unconfirmed transactions responses: - 200: + "200": description: List of unconfirmed transactions content: application/json: schema: $ref: "#/components/schemas/TxSearchResponse" - 500: + "500": description: Error content: application/json: @@ -910,13 +1046,13 @@ paths: description: | Get a trasasction responses: - 200: + "200": description: Get a transaction` content: application/json: schema: $ref: "#/components/schemas/TxResponse" - 500: + "500": description: Error content: application/json: @@ -931,13 +1067,13 @@ paths: description: | Get some info about the application. responses: - 200: + "200": description: Get some info about the application. content: application/json: schema: $ref: "#/components/schemas/ABCIInfoResponse" - 500: + "500": description: Error content: application/json: @@ -967,7 +1103,7 @@ paths: description: Height (0 means latest) required: false schema: - type: number + type: integer example: 1 default: 0 - in: query @@ -983,13 +1119,13 @@ paths: description: | Query the application for some information. responses: - 200: + "200": description: Response of the submitted query content: application/json: schema: $ref: "#/components/schemas/ABCIQueryResponse" - 500: + "500": description: Error content: application/json: @@ -1002,23 +1138,23 @@ paths: parameters: - in: query name: evidence - description: Amino-encoded JSON evidence + description: JSON evidence required: true schema: type: string - example: "JSON_EVIDENCE_Amino_encoded" + example: "JSON_EVIDENCE_encoded" tags: - - Info + - Evidence description: | Broadcast evidence of the misbehavior. responses: - 200: + "200": description: Broadcast evidence of the misbehavior. content: application/json: schema: $ref: "#/components/schemas/BroadcastEvidenceResponse" - 500: + "500": description: Error content: application/json: @@ -1031,7 +1167,7 @@ components: type: object properties: id: - type: number + type: integer example: 0 jsonrpc: type: string @@ -1107,7 +1243,6 @@ components: rpc_address: type: string example: "tcp:0.0.0.0:26657" - example: "moniker-node" SyncInfo: type: object properties: @@ -1123,18 +1258,18 @@ components: latest_block_time: type: string example: "2019-08-01T11:52:22.818762194Z" - earliest_block_hash: - type: string - example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" - earliest_app_hash: - type: string - example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" - earliest_block_height: - type: string - example: "1262196" - earliest_block_time: - type: string - example: "2019-08-01T11:52:22.818762194Z" + earliest_block_hash: + type: string + example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" + earliest_app_hash: + type: string + example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" + earliest_block_height: + type: string + example: "1262196" + earliest_block_time: + type: string + example: "2019-08-01T11:52:22.818762194Z" catching_up: type: boolean example: false @@ -1207,13 +1342,13 @@ components: type: string example: "0" Progress: - type: number + type: integer example: 0 Channel: type: object properties: ID: - type: number + type: integer example: 48 SendQueueCapacity: type: string @@ -1266,7 +1401,7 @@ components: type: string example: "Listener(@)" n_peers: - type: number + type: string example: "1" peers: type: array @@ -1280,129 +1415,21 @@ components: properties: result: $ref: "#/components/schemas/NetInfo" - BlockID: - required: - - "hash" - - "parts" - properties: - hash: - type: string - example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" - parts: - required: - - "total" - - "hash" - properties: - total: - type: string - example: "1" - hash: - type: string - example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" - type: object - type: object - BlockHeader: - required: - - "version" - - "chain_id" - - "height" - - "time" - - "last_block_id" - - "last_commit_hash" - - "data_hash" - - "validators_hash" - - "next_validators_hash" - - "consensus_hash" - - "app_hash" - - "last_results_hash" - - "evidence_hash" - - "proposer_address" - properties: - version: - required: - - "block" - - "app" - properties: - block: - type: string - example: "10" - app: - type: string - example: "0" - type: object - chain_id: - type: string - example: "cosmoshub-2" - height: - type: string - example: "12" - time: - type: string - example: "2019-04-22T17:01:51.701356223Z" - last_block_id: - $ref: "#/components/schemas/BlockID" - last_commit_hash: - type: string - example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" - data_hash: - type: string - example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: - type: string - example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: - type: string - example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - consensus_hash: - type: string - example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" - app_hash: - type: string - example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" - last_results_hash: - type: string - example: "" - evidence_hash: - type: string - example: "" - proposer_address: - type: string - example: "D540AB022088612AC74B287D076DBFBC4A377A2E" - type: object - BlockId: - required: - - "hash" - - "parts" - properties: - hash: - type: string - example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" - parts: - required: - - "total" - - "hash" - properties: - total: - type: string - example: "1" - hash: - type: string - example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" - type: object - type: object + BlockMeta: type: object properties: block_id: - $ref: "#/components/schemas/BlockId" + $ref: "#/components/schemas/BlockID" block_size: - type: number + type: integer example: 1000000 header: $ref: "#/components/schemas/BlockHeader" num_txs: type: string example: "54" + Blockchain: type: object required: @@ -1413,9 +1440,10 @@ components: type: string example: "1276718" block_metas: - type: "array" + type: array items: $ref: "#/components/schemas/BlockMeta" + BlockchainResponse: description: Blockchain info allOf: @@ -1424,6 +1452,7 @@ components: properties: result: $ref: "#/components/schemas/Blockchain" + Commit: required: - "type" @@ -1436,14 +1465,14 @@ components: - "signature" properties: type: - type: number + type: integer example: 2 height: type: string example: "1262085" round: - type: string - example: "0" + type: integer + example: 0 block_id: $ref: "#/components/schemas/BlockID" timestamp: @@ -1453,11 +1482,12 @@ components: type: string example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" validator_index: - type: string - example: "0" + type: integer + example: 0 signature: type: string example: "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + Block: type: object properties: @@ -1476,37 +1506,30 @@ components: type: object properties: height: - type: number + type: integer round: - type: number + type: integer block_id: $ref: "#/components/schemas/BlockID" signatures: type: array items: $ref: "#/components/schemas/Commit" - Validator: - type: object - properties: - pub_key: - $ref: "#/components/schemas/PubKey" - voting_power: - type: number - address: - type: string + Evidence: type: object properties: type: type: string height: - type: number + type: integer time: - type: number + type: integer total_voting_power: - type: number + type: integer validator: $ref: "#/components/schemas/Validator" + BlockComplete: type: object properties: @@ -1522,204 +1545,134 @@ components: properties: result: $ref: "#/components/schemas/BlockComplete" - Tag: - type: object - properties: - key: - type: string - example: "YWN0aW9u" - value: - type: string - example: "c2VuZA==" + ################## FROM NOW ON NEEDS REFACTOR ################## BlockResultsResponse: - type: "object" + type: object required: - "jsonrpc" - "id" - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: - type: "object" + type: object required: - "height" properties: height: - type: "string" + type: string example: "12" txs_results: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "object" + type: object properties: code: - type: "string" + type: string example: "0" data: - type: "string" + type: string example: "" log: - type: "string" + type: string example: "not enough gas" info: - type: "string" + type: string example: "" - gasWanted: - type: "string" + gas_wanted: + type: string example: "100" - gasUsed: - type: "string" + gas_used: + type: string example: "100" events: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "object" + type: object properties: type: - type: "string" + type: string example: "app" attributes: - type: "array" - x-nullable: false + type: array + nullable: false items: - type: "object" - properties: - key: - type: "string" - example: "Y3JlYXRvcg==" - value: - type: "string" - example: "Q29zbW9zaGkgTmV0b3dva28=" + $ref: "#/components/schemas/Event" codespace: - type: "string" + type: string example: "ibc" begin_block_events: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "object" + type: object properties: type: - type: "string" + type: string example: "app" attributes: - type: "array" - x-nullable: false + type: array + nullable: false items: - type: "object" - properties: - key: - type: "string" - example: "Y3JlYXRvcg==" - value: - type: "string" - example: "Q29zbW9zaGkgTmV0b3dva28=" + $ref: "#/components/schemas/Event" end_block: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "object" + type: object properties: type: - type: "string" + type: string example: "app" attributes: - type: "array" - x-nullable: false + type: array + nullable: false items: - type: "object" - properties: - key: - type: "string" - example: "Y3JlYXRvcg==" - value: - type: "string" - example: "Q29zbW9zaGkgTmV0b3dva28=" + $ref: "#/components/schemas/Event" validator_updates: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "object" + type: object properties: pub_key: - type: "object" + type: object required: - "type" - "value" properties: type: - type: "string" + type: string example: "tendermint/PubKeyEd25519" value: - type: "string" + type: string example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" power: - type: "string" + type: string example: "300" - consensus_param_updates: - type: "object" - x-nullable: true - required: - - "block" - - "evidence" - - "validator" - properties: - block: - type: "object" - required: - - "max_bytes" - - "max_gas" - - "time_iota_ms" - properties: - max_bytes: - type: "string" - example: "22020096" - max_gas: - type: "string" - example: "1000" - time_iota_ms: - type: "string" - example: "1000" - evidence: - type: "object" - required: - - "max_age" - properties: - max_age: - type: "string" - example: "100000" - validator: - type: "object" - required: - - "pub_key_types" - properties: - pub_key_types: - type: "array" - items: - type: "string" - example: - - "ed25519" + consensus_params_updates: + $ref: "#/components/schemas/ConsensusParams" CommitResponse: - type: "object" + type: object required: - "jsonrpc" - "id" - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -1732,92 +1685,7 @@ components: - "commit" properties: header: - required: - - "version" - - "chain_id" - - "height" - - "time" - - "last_block_id" - - "last_commit_hash" - - "data_hash" - - "validators_hash" - - "next_validators_hash" - - "consensus_hash" - - "app_hash" - - "last_results_hash" - - "evidence_hash" - - "proposer_address" - properties: - version: - required: - - "block" - - "app" - properties: - block: - type: "string" - example: "10" - app: - type: "string" - example: "0" - type: "object" - chain_id: - type: "string" - example: "cosmoshub-2" - height: - type: "string" - example: "12" - time: - type: "string" - example: "2019-04-22T17:01:51.701356223Z" - last_block_id: - required: - - "hash" - - "parts" - properties: - hash: - type: "string" - example: "D82C2734BB0E76C772A10994B210EF9D11505D1B98CB189D9CF7F9A5488672A5" - parts: - required: - - "total" - - "hash" - properties: - total: - type: "string" - example: "1" - hash: - type: "string" - example: "CB02DCAA7FB46BF874052EC2273FD0B1F2CF2E1593298D9781E60FE9C3DB8638" - type: "object" - type: "object" - last_commit_hash: - type: "string" - example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" - data_hash: - type: "string" - example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: - type: "string" - example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: - type: "string" - example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - consensus_hash: - type: "string" - example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" - app_hash: - type: "string" - example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" - last_results_hash: - type: "string" - example: "" - evidence_hash: - type: "string" - example: "" - proposer_address: - type: "string" - example: "D540AB022088612AC74B287D076DBFBC4A377A2E" - type: "object" + $ref: "#/components/schemas/BlockHeader" commit: required: - "height" @@ -1826,55 +1694,36 @@ components: - "signatures" properties: height: - type: "string" + type: string example: "1311801" round: - type: "string" - example: "0" + type: integer + example: 0 block_id: - required: - - "hash" - - "parts" - properties: - hash: - type: "string" - example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" - parts: - required: - - "total" - - "hash" - properties: - total: - type: "string" - example: "1" - hash: - type: "string" - example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" - type: "object" - type: "object" + $ref: "#/components/schemas/BlockID" signatures: - type: "array" + type: array items: - type: "object" + type: object properties: block_id_flag: - type: "number" + type: integer example: 2 validator_address: - type: "string" + type: string example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" timestamp: - type: "string" + type: string example: "2019-04-22T17:01:58.376629719Z" signature: - type: "string" + type: string example: "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw==" - type: "object" - type: "object" + type: object + type: object canonical: - type: "boolean" + type: boolean example: true - type: "object" + type: object ValidatorsResponse: type: object required: @@ -1883,10 +1732,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -1894,41 +1743,19 @@ components: - "validators" properties: block_height: - type: "string" + type: string example: "55" validators: - type: "array" + type: array items: - type: "object" - properties: - address: - type: "string" - example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" - type: "object" - voting_power: - type: "string" - example: "250353" - proposer_priority: - type: "string" - example: "13769415" + $ref: "#/components/schemas/ValidatorPriority" count: - type: "number" - example: 1 + type: string + example: "1" total: - type: "number" - example: 25 - type: "object" + type: string + example: "25" + type: object GenesisResponse: type: object required: @@ -1937,80 +1764,44 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: - type: "object" + type: object required: - "genesis" properties: genesis: - type: "object" + type: object required: - "genesis_time" - "chain_id" + - "initial_height" - "consensus_params" - "validators" - "app_hash" properties: genesis_time: - type: "string" + type: string example: "2019-04-22T17:00:00Z" chain_id: - type: "string" + type: string example: "cosmoshub-2" + initial_height: + type: string + example: "2" consensus_params: - type: "object" - required: - - "block" - - "evidence" - - "validator" - properties: - block: - type: "object" - required: - - "max_bytes" - - "max_gas" - - "time_iota_ms" - properties: - max_bytes: - type: "string" - example: "22020096" - max_gas: - type: "string" - example: "1000" - time_iota_ms: - type: "string" - example: "1000" - evidence: - type: "object" - required: - - "max_age" - properties: - max_age: - type: "string" - example: "100000" - validator: - type: "object" - required: - - "pub_key_types" - properties: - pub_key_types: - type: "array" - items: - type: "string" - example: - - "ed25519" + $ref: "#/components/schemas/ConsensusParams" validators: - type: "array" + type: array items: - type: "object" + type: object properties: address: - type: "string" + type: string example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" pub_key: required: @@ -2018,24 +1809,24 @@ components: - "value" properties: type: - type: "string" + type: string example: "tendermint/PubKeyEd25519" value: - type: "string" + type: string example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" - type: "object" + type: object power: - type: "string" + type: string example: "9328525" name: - type: "string" + type: string example: "Certus One" app_hash: - type: "string" + type: string example: "" app_state: properties: {} - type: "object" + type: object DumpConsensusResponse: type: object @@ -2045,10 +1836,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2079,19 +1870,19 @@ components: - "triggered_timeout_precommit" properties: height: - type: "string" + type: string example: "1311801" round: - type: "string" - example: "0" + type: integer + example: 0 step: - type: "number" + type: integer example: 3 start_time: - type: "string" + type: string example: "2019-08-05T11:28:49.064658805Z" commit_time: - type: "string" + type: string example: "2019-08-05T11:28:44.064658805Z" validators: required: @@ -2099,191 +1890,93 @@ components: - "proposer" properties: validators: - type: "array" + type: array items: - type: "object" - properties: - address: - type: "string" - example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" - type: "object" - voting_power: - type: "string" - example: "239727" - proposer_priority: - type: "string" - example: "-11896414" + $ref: "#/components/schemas/ValidatorPriority" proposer: - required: - - "address" - - "pub_key" - - "voting_power" - - "proposer_priority" - properties: - address: - type: "string" - example: "708FDDCE121CDADA502F2B0252FEF13FDAA31E50" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "VNMNfw7mrQBSpEvCtA9ykOe6BoR00RM9b/a9v3vXZhY=" - type: "object" - voting_power: - type: "string" - example: "295360" - proposer_priority: - type: "string" - example: "-88886833" - type: "object" - type: "object" + $ref: "#/components/schemas/ValidatorPriority" + type: object locked_round: - type: "string" - example: "-1" + type: integer + example: -1 valid_round: - type: "string" + type: string example: "-1" votes: - type: "array" + type: array items: - type: "object" + type: object properties: round: - type: "string" + type: string example: "0" prevotes: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "string" + type: string example: - "nil-Vote" - "Vote{19:46A3F8B8393B 1311801/00/1(Prevote) 000000000000 64CE682305CB @ 2019-08-05T11:28:47.374703444Z}" prevotes_bit_array: - type: "string" + type: string example: "BA{100:___________________x________________________________________________________________________________} 209706/170220253 = 0.00" precommits: - type: "array" - x-nullable: true + type: array + nullable: true items: - type: "string" + type: string example: - "nil-Vote" precommits_bit_array: - type: "string" + type: string example: "BA{100:____________________________________________________________________________________________________} 0/170220253 = 0.00" commit_round: - type: "string" - example: "-1" + type: integer + example: -1 last_commit: - x-nullable: true + nullable: true required: - "votes" - "votes_bit_array" - "peer_maj_23s" properties: votes: - type: "array" + type: array items: - type: "string" + type: string example: - "Vote{0:000001E443FD 1311800/00/2(Precommit) 3071ADB27D1A 77EE1B6B6847 @ 2019-08-05T11:28:43.810128139Z}" votes_bit_array: - type: "string" + type: string example: "BA{100:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 170220253/170220253 = 1.00" peer_maj_23s: properties: {} - type: "object" - type: "object" + type: object + type: object last_validators: required: - "validators" - "proposer" properties: validators: - type: "array" + type: array items: - type: "object" - properties: - address: - type: "string" - example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" - type: "object" - voting_power: - type: "string" - example: "239727" - proposer_priority: - type: "string" - example: "-12136141" + $ref: "#/components/schemas/ValidatorPriority" proposer: - required: - - "address" - - "pub_key" - - "voting_power" - - "proposer_priority" - properties: - address: - type: "string" - example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" - type: "object" - voting_power: - type: "string" - example: "8590153" - proposer_priority: - type: "string" - example: "-79515145" - type: "object" - type: "object" + $ref: "#/components/schemas/ValidatorPriority" + type: object triggered_timeout_precommit: - type: "boolean" + type: boolean example: false - type: "object" + type: object peers: - type: "array" + type: array items: - type: "object" + type: object properties: node_address: - type: "string" + type: string example: "357f6a6c1d27414579a8185060aa8adf9815c43c@68.183.41.207:26656" peer_state: required: @@ -2309,19 +2002,19 @@ components: - "catchup_commit" properties: height: - type: "string" + type: string example: "1311801" round: - type: "string" + type: string example: "0" step: - type: "number" + type: integer example: 3 start_time: - type: "string" + type: string example: "2019-08-05T11:28:49.21730864Z" proposal: - type: "boolean" + type: boolean example: false proposal_block_parts_header: required: @@ -2329,59 +2022,60 @@ components: - "hash" properties: total: - type: "string" - example: "0" + type: integer + example: 0 hash: - type: "string" + type: string example: "" - type: "object" + type: object proposal_pol_round: - x-nullable: true - type: "string" - example: "-1" + nullable: true + type: integer + example: -1 proposal_pol: - x-nullable: true - type: "string" + nullable: true + type: string example: "____________________________________________________________________________________________________" prevotes: - x-nullable: true - type: "string" + nullable: true + type: string example: "___________________x________________________________________________________________________________" precommits: - x-nullable: true - type: "string" + nullable: true + type: string example: "____________________________________________________________________________________________________" last_commit_round: - x-nullable: true - type: "string" - example: "0" + nullable: true + type: integer + example: 0 last_commit: - x-nullable: true - type: "string" + nullable: true + type: string example: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" catchup_commit_round: - type: "string" - x-nullable: true - example: "-1" + type: integer + nullable: true + example: -1 catchup_commit: - x-nullable: true - type: "string" + nullable: true + type: string example: "____________________________________________________________________________________________________" - type: "object" + type: object stats: required: - "votes" - "block_parts" properties: votes: - type: "string" + type: string example: "1159558" block_parts: - type: "string" + type: string example: "4786" - type: "object" - type: "object" - type: "object" + type: object + type: object + type: object + ConsensusStateResponse: type: object required: @@ -2390,10 +2084,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2410,59 +2104,60 @@ components: - "proposer" properties: height/round/step: - type: "string" + type: string example: "1262197/0/8" start_time: - type: "string" + type: string example: "2019-08-01T11:52:38.962730289Z" proposal_block_hash: - type: "string" + type: string example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" locked_block_hash: - type: "string" + type: string example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" valid_block_hash: - type: "string" + type: string example: "634ADAF1F402663BEC2ABC340ECE8B4B45AA906FA603272ACC5F5EED3097E009" height_vote_set: - type: "array" + type: array items: - type: "object" + type: object properties: round: - type: "string" - example: "0" + type: integer + example: 0 prevotes: - type: "array" + type: array items: - type: "string" + type: string example: - "Vote{0:000001E443FD 1262197/00/1(Prevote) 634ADAF1F402 7BB974E1BA40 @ 2019-08-01T11:52:35.513572509Z}" - "nil-Vote" prevotes_bit_array: - type: "string" + type: string example: "BA{100:xxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx} 169753436/170151262 = 1.00" precommits: - type: "array" + type: array items: - type: "string" + type: string example: - "Vote{5:18C78D135C9D 1262197/00/2(Precommit) 634ADAF1F402 8B5EFFFEABCD @ 2019-08-01T11:52:36.25600005Z}" - "nil-Vote" precommits_bit_array: - type: "string" + type: string example: "BA{100:xxxxxx_xxxxx_xxxx_x_xxx_xx_xx_xx__x_x_x__xxxxxxxxxxxxxx_xxxx_xx_xxxxxx_xxxxxxxx_xxxx_xxx_x_xxxx__xxx} 118726247/170151262 = 0.70" proposer: - type: "object" + type: object properties: address: - type: "string" + type: string example: "D540AB022088612AC74B287D076DBFBC4A377A2E" index: - type: "string" - example: "0" - type: "object" - type: "object" + type: integer + example: 0 + type: object + type: object + ConsensusParamsResponse: type: object required: @@ -2471,62 +2166,22 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: - type: "object" + type: object required: - "block_height" - "consensus_params" properties: block_height: - type: "string" - example: "1313448" + type: string + example: "1" consensus_params: - type: "object" - required: - - "block" - - "evidence" - - "validator" - properties: - block: - type: "object" - required: - - "max_bytes" - - "max_gas" - - "time_iota_ms" - properties: - max_bytes: - type: "string" - example: "22020096" - max_gas: - type: "string" - example: "1000" - time_iota_ms: - type: "string" - example: "1000" - evidence: - type: "object" - required: - - "max_age" - properties: - max_age: - type: "string" - example: "100000" - validator: - type: "object" - required: - - "pub_key_types" - properties: - pub_key_types: - type: "array" - items: - type: "string" - example: - - "ed25519" + $ref: "#/components/schemas/ConsensusParams" NumUnconfirmedTransactionsResponse: type: object @@ -2536,10 +2191,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2548,23 +2203,24 @@ components: - "total_bytes" properties: n_txs: - type: "string" + type: string example: "31" total: - type: "string" + type: string example: "82" total_bytes: - type: "string" + type: string example: "19974" # txs: - # type: "array" - # x-nullable: true + # type: array + # nullable: true # items: - # type: "string" - # x-nullable: true + # type: string + # nullable: true # example: # - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" - type: "object" + type: object + UnconfirmedTransactionsResponse: type: object required: @@ -2573,10 +2229,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2586,23 +2242,24 @@ components: - "txs" properties: n_txs: - type: "string" + type: string example: "82" total: - type: "string" + type: string example: "82" total_bytes: - type: "string" + type: string example: "19974" txs: type: array - x-nullable: true + nullable: true items: type: string - x-nullable: true + nullable: true example: - "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" - type: "object" + type: object + TxSearchResponse: type: object required: @@ -2611,10 +2268,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2622,49 +2279,40 @@ components: - "total_count" properties: txs: - type: "array" + type: array items: - type: "object" + type: object properties: hash: - type: "string" + type: string example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" height: - type: "string" + type: string example: "1000" index: - type: "number" + type: integer example: 0 tx_result: required: - "log" - - "gasWanted" - - "gasUsed" + - "gas_wanted" + - "gas_used" - "tags" properties: log: - type: "string" + type: string example: '[{"msg_index":"0","success":true,"log":""}]' - gasWanted: - type: "string" + gas_wanted: + type: string example: "200000" - gasUsed: - type: "string" + gas_used: + type: string example: "28596" tags: - type: "array" - items: - type: "object" - properties: - key: - type: "string" - example: "YWN0aW9u" - value: - type: "string" - example: "c2VuZA==" - type: "object" + $ref: "#/components/schemas/Event" + type: object tx: - type: "string" + type: string example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" proof: required: @@ -2673,10 +2321,10 @@ components: - "Proof" properties: RootHash: - type: "string" + type: string example: "72FE6BF6D4109105357AECE0A82E99D0F6288854D16D8767C5E72C57F876A14D" Data: - type: "string" + type: string example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" Proof: required: @@ -2686,26 +2334,27 @@ components: - "aunts" properties: total: - type: "string" + type: string example: "2" index: - type: "string" + type: string example: "0" leaf_hash: - type: "string" + type: string example: "eoJxKCzF3m72Xiwb/Q43vJ37/2Sx8sfNS9JKJohlsYI=" aunts: - type: "array" + type: array items: - type: "string" + type: string example: - "eWb+HG/eMmukrQj4vNGyFYb3nKQncAWacq4HF5eFzDY=" - type: "object" - type: "object" + type: object + type: object total_count: - type: "string" + type: string example: "2" - type: "object" + type: object + TxResponse: type: object required: @@ -2714,10 +2363,10 @@ components: - "result" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2728,46 +2377,40 @@ components: - "tx" properties: hash: - type: "string" + type: string example: "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" height: - type: "string" + type: string example: "1000" index: - type: "number" + type: integer example: 0 tx_result: required: - "log" - - "gasWanted" - - "gasUsed" + - "gas_wanted" + - "gas_used" - "tags" properties: log: - type: "string" + type: string example: '[{"msg_index":"0","success":true,"log":""}]' - gasWanted: - type: "string" + gas_wanted: + type: string example: "200000" - gasUsed: - type: "string" + gas_used: + type: string example: "28596" tags: - type: "array" + type: array items: - type: "object" - properties: - key: - type: "string" - example: "YWN0aW9u" - value: - type: "string" - example: "c2VuZA==" - type: "object" + $ref: "#/components/schemas/Event" + type: object tx: - type: "string" + type: string example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" - type: "object" + type: object + ABCIInfoResponse: type: object required: @@ -2775,10 +2418,10 @@ components: - "id" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2791,16 +2434,17 @@ components: - "version" properties: data: - type: "string" + type: string example: '{"size":0}' version: type: string example: "0.16.1" app_version: - type: "string" + type: string example: "1314126" - type: "object" - type: "object" + type: object + type: object + ABCIQueryResponse: type: object required: @@ -2810,7 +2454,7 @@ components: - "jsonrpc" properties: error: - type: "string" + type: string example: "" result: required: @@ -2827,34 +2471,35 @@ components: - "code" properties: log: - type: "string" + type: string example: "exists" height: - type: "string" + type: string example: "0" proof: - type: "string" + type: string example: "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C" value: - type: "string" + type: string example: "61626364" key: - type: "string" + type: string example: "61626364" index: - type: "string" + type: string example: "-1" code: - type: "string" + type: string example: "0" - type: "object" - type: "object" + type: object + type: object id: - type: "number" + type: integer example: 0 jsonrpc: - type: "string" + type: string example: "2.0" + BroadcastEvidenceResponse: type: object required: @@ -2862,17 +2507,18 @@ components: - "jsonrpc" properties: error: - type: "string" + type: string example: "" result: - type: "string" + type: string example: "" id: - type: "number" + type: integer example: 0 jsonrpc: - type: "string" + type: string example: "2.0" + BroadcastTxCommitResponse: type: object required: @@ -2882,7 +2528,7 @@ components: - "jsonrpc" properties: error: - type: "string" + type: string example: "" result: required: @@ -2892,10 +2538,10 @@ components: - "check_tx" properties: height: - type: "string" + type: string example: "26682" hash: - type: "string" + type: string example: "75CA0F856A4DA078FC4911580360E70CEFB2EBEE" deliver_tx: required: @@ -2904,15 +2550,15 @@ components: - "code" properties: log: - type: "string" + type: string example: "" data: - type: "string" + type: string example: "" code: - type: "string" + type: string example: "0" - type: "object" + type: object check_tx: required: - "log" @@ -2920,22 +2566,83 @@ components: - "code" properties: log: - type: "string" + type: string example: "" data: - type: "string" + type: string example: "" code: - type: "string" + type: string example: "0" - type: "object" - type: "object" + type: object + type: object + id: + type: integer + example: 0 + jsonrpc: + type: string + example: "2.0" + + CheckTxResponse: + type: object + required: + - "error" + - "result" + - "id" + - "jsonrpc" + properties: + error: + type: string + example: "" + result: + required: + - "log" + - "data" + - "code" + properties: + code: + type: string + example: "0" + data: + type: string + example: "" + log: + type: string + example: "" + info: + type: string + example: "" + gas_wanted: + type: string + example: "1" + gas_used: + type: string + example: "0" + events: + type: array + nullable: true + items: + type: object + properties: + type: + type: string + example: "app" + attributes: + type: array + nullable: false + items: + $ref: "#/components/schemas/Event" + codespace: + type: string + example: "bank" + type: object id: - type: "number" + type: integer example: 0 jsonrpc: - type: "string" + type: string example: "2.0" + BroadcastTxResponse: type: object required: @@ -2945,10 +2652,10 @@ components: - "error" properties: jsonrpc: - type: "string" + type: string example: "2.0" id: - type: "number" + type: integer example: 0 result: required: @@ -2958,46 +2665,217 @@ components: - "hash" properties: code: - type: "string" + type: string example: "0" data: - type: "string" + type: string example: "" log: - type: "string" + type: string example: "" codespace: - type: "string" + type: string example: "ibc" hash: - type: "string" + type: string example: "0D33F2F03A5234F38706E43004489E061AC40A2E" - type: "object" + type: object error: - type: "string" + type: string example: "" - dialPeersPost: + + dialResp: type: object properties: - Persistent: + Log: + type: string + example: "Dialing seeds in progress. See /net_info for details" + + ###### Reuseable types ###### + + # Validator type with proposer prioirty + ValidatorPriority: + type: object + properties: + address: + type: string + example: "000001E443FD237E4B616E2FA69DF4EE3D49A94F" + pub_key: + required: + - "type" + - "value" + properties: + type: + type: string + example: "tendermint/PubKeyEd25519" + value: + type: string + example: "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + type: object + voting_power: + type: string + example: "239727" + proposer_priority: + type: string + example: "-11896414" + + # Stripped down validator + Validator: + type: object + properties: + pub_key: + $ref: "#/components/schemas/PubKey" + voting_power: + type: integer + address: + type: string + + # Consensus Params + ConsensusParams: + type: object + nullable: true + required: + - "block" + - "evidence" + - "validator" + properties: + block: + type: object + required: + - "max_bytes" + - "max_gas" + - "time_iota_ms" + properties: + max_bytes: + type: string + example: "22020096" + max_gas: + type: string + example: "1000" + time_iota_ms: + type: string + example: "1000" + evidence: + type: object + required: + - "max_age" + properties: + max_age: + type: string + example: "100000" + validator: + type: object + required: + - "pub_key_types" + properties: + pub_key_types: + type: array + items: + type: string + example: + - "ed25519" + + # Events in tendermint + Event: + type: object + properties: + key: + type: string + example: "YWN0aW9u" + value: + type: string + example: "c2VuZA==" + index: type: boolean example: false - Peers: - type: array - items: - type: "string" - example: ["6f172048b821e3b1ab98ffb0973ba737966eecf8@192.168.1.2:26656"] - dialSeedsPost: - type: object + + # Block Header + BlockHeader: + required: + - "version" + - "chain_id" + - "height" + - "time" + - "last_block_id" + - "last_commit_hash" + - "data_hash" + - "validators_hash" + - "next_validators_hash" + - "consensus_hash" + - "app_hash" + - "last_results_hash" + - "evidence_hash" + - "proposer_address" properties: - Peers: - type: array - items: - type: "string" - example: ["6f172048b821e3b1ab98ffb0973ba737966eecf8@192.168.1.2:26656"] - dialResp: + version: + required: + - "block" + - "app" + properties: + block: + type: string + example: "10" + app: + type: string + example: "0" + type: object + chain_id: + type: string + example: "cosmoshub-2" + height: + type: string + example: "12" + time: + type: string + example: "2019-04-22T17:01:51.701356223Z" + last_block_id: + $ref: "#/components/schemas/BlockID" + last_commit_hash: + type: string + example: "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812" + data_hash: + type: string + example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" + validators_hash: + type: string + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + next_validators_hash: + type: string + example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" + consensus_hash: + type: string + example: "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8" + app_hash: + type: string + example: "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C" + last_results_hash: + type: string + example: "" + evidence_hash: + type: string + example: "" + proposer_address: + type: string + example: "D540AB022088612AC74B287D076DBFBC4A377A2E" type: object + + BlockID: + required: + - "hash" + - "parts" properties: - Log: + hash: type: string - example: "Dialing seeds in progress. See /net_info for details" + example: "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7" + parts: + required: + - "total" + - "hash" + properties: + total: + type: integer + example: 1 + hash: + type: string + example: "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + type: object diff --git a/rpc/swagger/index.html b/rpc/swagger/index.html deleted file mode 100644 index d6b0fc5a9..000000000 --- a/rpc/swagger/index.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - Tendermint RPC - - - - - - -
- - - diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 46aea59e1..6c55fc34b 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -19,7 +19,7 @@ import ( "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" core_grpc "github.com/tendermint/tendermint/rpc/grpc" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) // Options helps with specifying some parameters for our RPC testing for greater @@ -37,14 +37,13 @@ var defaultOptions = Options{ func waitForRPC() { laddr := GetConfig().RPC.ListenAddress - client, err := rpcclient.NewJSONRPCClient(laddr) + client, err := rpcclient.New(laddr) if err != nil { panic(err) } - ctypes.RegisterAmino(client.Codec()) result := new(ctypes.ResultStatus) for { - _, err := client.Call("status", map[string]interface{}{}, result) + _, err := client.Call(context.Background(), "status", map[string]interface{}{}, result) if err == nil { return } @@ -73,7 +72,7 @@ func makePathname() string { } // fmt.Println(p) sep := string(filepath.Separator) - return strings.Replace(p, sep, "_", -1) + return strings.ReplaceAll(p, sep, "_") } func randPort() int { @@ -100,7 +99,6 @@ func createConfig() *cfg.Config { c.RPC.ListenAddress = rpc c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} c.RPC.GRPCListenAddress = grpc - c.TxIndex.IndexKeys = "app.creator,tx.height" // see kvstore application return c } @@ -143,7 +141,9 @@ func StartTendermint(app abci.Application, opts ...func(*Options)) *nm.Node { // StopTendermint stops a test tendermint server, waits until it's stopped and // cleans up test/config files. func StopTendermint(node *nm.Node) { - node.Stop() + if err := node.Stop(); err != nil { + node.Logger.Error("Error when tryint to stop node", "err", err) + } node.Wait() os.RemoveAll(node.Config().RootDir) } @@ -161,7 +161,10 @@ func NewTendermint(app abci.Application, opts *Options) *nm.Node { } pvKeyFile := config.PrivValidatorKeyFile() pvKeyStateFile := config.PrivValidatorStateFile() - pv := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + pv, err := privval.LoadOrGenFilePV(pvKeyFile, pvKeyStateFile) + if err != nil { + panic(err) + } papp := proxy.NewLocalClientCreator(app) nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) if err != nil { diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 000000000..52348b635 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +set -ue + +# Expect the following envvars to be set: +# - APP +# - VERSION +# - COMMIT +# - TARGET_OS +# - LEDGER_ENABLED +# - DEBUG + +# Source builder's functions library +. /usr/local/share/cosmos-sdk/buildlib.sh + +# These variables are now available +# - BASEDIR +# - OUTDIR + +# Build for each os-architecture pair +for platform in ${TARGET_PLATFORMS} ; do + # This function sets GOOS, GOARCH, and OS_FILE_EXT environment variables + # according to the build target platform. OS_FILE_EXT is empty in all + # cases except when the target platform is 'windows'. + setup_build_env_for_platform "${platform}" + + make clean + echo Building for $(go env GOOS)/$(go env GOARCH) >&2 + GOROOT_FINAL="$(go env GOROOT)" \ + make build LDFLAGS=-buildid=${VERSION} COMMIT=${COMMIT} + mv ./build/${APP}${OS_FILE_EXT} ${OUTDIR}/${APP}-${VERSION}-$(go env GOOS)-$(go env GOARCH)${OS_FILE_EXT} + + # This function restore the build environment variables to their + # original state. + restore_build_env +done + +# Generate and display build report. +generate_build_report +cat ${OUTDIR}/build_report diff --git a/scripts/dist.sh b/scripts/dist.sh index 81fdf9813..234380403 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -20,7 +20,7 @@ rm -rf build/pkg mkdir -p build/pkg # Get the git commit -GIT_COMMIT="$(git rev-parse --short=8 HEAD)" +VERSION := "$(shell git describe --always)" GIT_IMPORT="github.com/tendermint/tendermint/version" # Determine the arch/os combos we're building for @@ -41,7 +41,7 @@ for arch in "${arch_list[@]}"; do for os in "${os_list[@]}"; do if [[ "$XC_EXCLUDE" != *" $os/$arch "* ]]; then echo "--> $os/$arch" - GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint + GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.TMCoreSemVer=${VERSION}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint fi done done diff --git a/scripts/gitian-build.sh b/scripts/gitian-build.sh deleted file mode 100755 index 7471b472f..000000000 --- a/scripts/gitian-build.sh +++ /dev/null @@ -1,201 +0,0 @@ -#!/bin/bash - -# symbol prefixes: -# g_ -> global -# l_ - local variable -# f_ -> function - -set -euo pipefail - -GITIAN_CACHE_DIRNAME='.gitian-builder-cache' -GO_RELEASE='1.13.3' -GO_TARBALL="go${GO_RELEASE}.linux-amd64.tar.gz" -GO_TARBALL_URL="https://dl.google.com/go/${GO_TARBALL}" - -# Defaults - -DEFAULT_SIGN_COMMAND='gpg --detach-sign' -DEFAULT_TENDERMINT_SIGS=${TENDERMINT_SIGS:-'tendermint.sigs'} -DEFAULT_GITIAN_REPO='https://github.com/tendermint/gitian-builder' -DEFAULT_GBUILD_FLAGS='' -DEFAULT_SIGS_REPO='https://github.com/tendermint/tendermint.sigs' - -# Overrides - -SIGN_COMMAND=${SIGN_COMMAND:-${DEFAULT_SIGN_COMMAND}} -GITIAN_REPO=${GITIAN_REPO:-${DEFAULT_GITIAN_REPO}} -GBUILD_FLAGS=${GBUILD_FLAGS:-${DEFAULT_GBUILD_FLAGS}} - -# Globals - -g_workdir='' -g_gitian_cache='' -g_cached_gitian='' -g_cached_go_tarball='' -g_sign_identity='' -g_sigs_dir='' -g_flag_commit='' - - -f_help() { - cat >&2 <&2 - mkdir "${l_builddir}/inputs/" - cp -v "${g_cached_go_tarball}" "${l_builddir}/inputs/" - done -} - -f_build() { - local l_descriptor - - l_descriptor=$1 - - bin/gbuild --commit tendermint="$g_commit" ${GBUILD_FLAGS} "$l_descriptor" - libexec/stop-target || f_echo_stderr "warning: couldn't stop target" -} - -f_sign_verify() { - local l_descriptor - - l_descriptor=$1 - - bin/gsign -p "${SIGN_COMMAND}" -s "${g_sign_identity}" --destination="${g_sigs_dir}" --release=${g_release} ${l_descriptor} - bin/gverify --destination="${g_sigs_dir}" --release="${g_release}" ${l_descriptor} -} - -f_commit_sig() { - local l_release_name - - l_release_name=$1 - - pushd "${g_sigs_dir}" - git add . || echo "git add failed" >&2 - git commit -m "Add ${l_release_name} reproducible build" || echo "git commit failed" >&2 - popd -} - -f_prep_docker_image() { - pushd $1 - bin/make-base-vm --docker --suite bionic --arch amd64 - popd -} - -f_ensure_cache() { - g_gitian_cache="${g_workdir}/${GITIAN_CACHE_DIRNAME}" - [ -d "${g_gitian_cache}" ] || mkdir "${g_gitian_cache}" - - g_cached_go_tarball="${g_gitian_cache}/${GO_TARBALL}" - if [ ! -f "${g_cached_go_tarball}" ]; then - f_echo_stderr "${g_cached_go_tarball}: cache miss, caching..." - curl -L "${GO_TARBALL_URL}" --output "${g_cached_go_tarball}" - fi - - g_cached_gitian="${g_gitian_cache}/gitian-builder" - if [ ! -d "${g_cached_gitian}" ]; then - f_echo_stderr "${g_cached_gitian}: cache miss, caching..." - git clone ${GITIAN_REPO} "${g_cached_gitian}" - fi -} - -f_demangle_platforms() { - case "${1}" in - all) - printf '%s' 'darwin linux windows' ;; - linux|darwin|windows) - printf '%s' "${1}" ;; - *) - echo "invalid platform -- ${1}" - exit 1 - esac -} - -f_echo_stderr() { - echo $@ >&2 -} - - -while getopts ":cs:h" opt; do - case "${opt}" in - h) f_help ; exit 0 ;; - c) g_flag_commit=y ;; - s) g_sign_identity="${OPTARG}" ;; - esac -done - -shift "$((OPTIND-1))" - -g_platforms=$(f_demangle_platforms "${1}") -g_workdir="$(pwd)" -g_commit="$(git rev-parse HEAD)" -g_sigs_dir=${TENDERMINT_SIGS:-"${g_workdir}/${DEFAULT_TENDERMINT_SIGS}"} - -f_ensure_cache - -f_prep_docker_image "${g_cached_gitian}" - -f_prep_build "${g_platforms}" - -export USE_DOCKER=1 -for g_os in ${g_platforms}; do - g_release="$(git describe --tags --abbrev=9 | sed 's/^v//')-${g_os}" - g_descriptor="${g_workdir}/scripts/gitian-descriptors/gitian-${g_os}.yml" - [ -f ${g_descriptor} ] - g_builddir="$(f_builddir ${g_os})" - - pushd "${g_builddir}" - f_build "${g_descriptor}" - if [ -n "${g_sign_identity}" ]; then - f_sign_verify "${g_descriptor}" - fi - popd - - if [ -n "${g_sign_identity}" -a -n "${g_flag_commit}" ]; then - [ -d "${g_sigs_dir}/.git/" ] && f_commit_sig ${g_release} || f_echo_stderr "couldn't commit, ${g_sigs_dir} is not a git clone" - fi -done - -exit 0 diff --git a/scripts/gitian-descriptors/gitian-darwin.yml b/scripts/gitian-descriptors/gitian-darwin.yml deleted file mode 100644 index 90a9fb9d4..000000000 --- a/scripts/gitian-descriptors/gitian-darwin.yml +++ /dev/null @@ -1,107 +0,0 @@ ---- -name: "tendermint-darwin" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "bsdmainutils" -- "build-essential" -- "ca-certificates" -- "curl" -- "debhelper" -- "dpkg-dev" -- "devscripts" -- "fakeroot" -- "git" -- "golang-any" -- "xxd" -- "quilt" -remotes: -- "url": "https://github.com/tendermint/tendermint.git" - "dir": "tendermint" -files: -- "go1.13.3.linux-amd64.tar.gz" -script: | - set -e -o pipefail - - GO_SRC_RELEASE=go1.13.3.linux-amd64 - GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" - # Compile go and configure the environment - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export BUILD_DIR=`pwd` - tar xf "${GO_SRC_TARBALL}" - rm -f "${GO_SRC_TARBALL}" - [ -d go/ ] - - export GOOS=darwin - export GOROOT=${BUILD_DIR}/go - export GOPATH=${BUILD_DIR}/gopath - mkdir -p ${GOPATH}/bin - - export PATH_orig=${PATH} - export PATH=$GOPATH/bin:$GOROOT/bin:$PATH - - export ARCHS='386 amd64' - export GO111MODULE=on - - # Make release tarball - pushd tendermint - VERSION=$(git describe --tags | sed 's/^v//') - COMMIT=$(git rev-parse --short=8 HEAD) - DISTNAME=tendermint-${VERSION} - git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD - SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` - popd - - # Correct tar file order - mkdir -p temp - pushd temp - tar xf $SOURCEDIST - rm $SOURCEDIST - find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST - popd - - # Prepare GOPATH and install deps - distsrc=${GOPATH}/src/github.com/tendermint/tendermint - mkdir -p ${distsrc} - pushd ${distsrc} - tar --strip-components=1 -xf $SOURCEDIST - go mod download - popd - - # Configure LDFLAGS for reproducible builds - LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ - -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" - - # Extract release tarball and build - for arch in ${ARCHS}; do - INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} - mkdir -p ${INSTALLPATH} - - # Build tendermint binary - pushd ${distsrc} - GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ - -trimpath \ - -gcflags=all=-trimpath=${GOPATH} \ - -asmflags=all=-trimpath=${GOPATH} \ - -mod=readonly -tags "tendermint" \ - -ldflags="${LDFLAGS}" \ - -o ${INSTALLPATH}/tendermint ./cmd/tendermint/ - - popd # ${distsrc} - - pushd ${INSTALLPATH} - find -type f | sort | tar \ - --no-recursion --mode='u+rw,go+r-w,a+X' \ - --numeric-owner --sort=name \ - --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-darwin-${arch}.tar.gz - popd # installed - done - - rm -rf ${distsrc} - - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-descriptors/gitian-linux.yml b/scripts/gitian-descriptors/gitian-linux.yml deleted file mode 100644 index 8aab869ee..000000000 --- a/scripts/gitian-descriptors/gitian-linux.yml +++ /dev/null @@ -1,106 +0,0 @@ ---- -name: "tendermint-linux" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "bsdmainutils" -- "build-essential" -- "ca-certificates" -- "curl" -- "debhelper" -- "dpkg-dev" -- "devscripts" -- "fakeroot" -- "git" -- "golang-any" -- "xxd" -- "quilt" -remotes: -- "url": "https://github.com/tendermint/tendermint.git" - "dir": "tendermint" -files: -- "go1.13.3.linux-amd64.tar.gz" -script: | - set -e -o pipefail - - GO_SRC_RELEASE=go1.13.3.linux-amd64 - GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" - # Compile go and configure the environment - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export BUILD_DIR=`pwd` - tar xf "${GO_SRC_TARBALL}" - rm -f "${GO_SRC_TARBALL}" - [ -d go/ ] - - export GOROOT=${BUILD_DIR}/go - export GOPATH=${BUILD_DIR}/gopath - mkdir -p ${GOPATH}/bin - - export PATH_orig=${PATH} - export PATH=$GOPATH/bin:$GOROOT/bin:$PATH - - export ARCHS='386 amd64 arm arm64' - export GO111MODULE=on - - # Make release tarball - pushd tendermint - VERSION=$(git describe --tags | sed 's/^v//') - COMMIT=$(git rev-parse --short=8 HEAD) - DISTNAME=tendermint-${VERSION} - git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD - SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` - popd - - # Correct tar file order - mkdir -p temp - pushd temp - tar xf $SOURCEDIST - rm $SOURCEDIST - find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST - popd - - # Prepare GOPATH and install deps - distsrc=${GOPATH}/src/github.com/tendermint/tendermint - mkdir -p ${distsrc} - pushd ${distsrc} - tar --strip-components=1 -xf $SOURCEDIST - go mod download - popd - - # Configure LDFLAGS for reproducible builds - LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ - -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" - - # Extract release tarball and build - for arch in ${ARCHS}; do - INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} - mkdir -p ${INSTALLPATH} - - # Build tendermint binary - pushd ${distsrc} - GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ - -trimpath \ - -gcflags=all=-trimpath=${GOPATH} \ - -asmflags=all=-trimpath=${GOPATH} \ - -mod=readonly -tags "tendermint" \ - -ldflags="${LDFLAGS}" \ - -o ${INSTALLPATH}/tendermint ./cmd/tendermint/ - - popd # ${distsrc} - - pushd ${INSTALLPATH} - find -type f | sort | tar \ - --no-recursion --mode='u+rw,go+r-w,a+X' \ - --numeric-owner --sort=name \ - --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-linux-${arch}.tar.gz - popd # installed - done - - rm -rf ${distsrc} - - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-descriptors/gitian-windows.yml b/scripts/gitian-descriptors/gitian-windows.yml deleted file mode 100644 index 23dbdab2f..000000000 --- a/scripts/gitian-descriptors/gitian-windows.yml +++ /dev/null @@ -1,107 +0,0 @@ ---- -name: "tendermint-windows" -enable_cache: true -distro: "ubuntu" -suites: -- "bionic" -architectures: -- "amd64" -packages: -- "bsdmainutils" -- "build-essential" -- "ca-certificates" -- "curl" -- "debhelper" -- "dpkg-dev" -- "devscripts" -- "fakeroot" -- "git" -- "golang-any" -- "xxd" -- "quilt" -remotes: -- "url": "https://github.com/tendermint/tendermint.git" - "dir": "tendermint" -files: -- "go1.13.3.linux-amd64.tar.gz" -script: | - set -e -o pipefail - - GO_SRC_RELEASE=go1.13.3.linux-amd64 - GO_SRC_TARBALL="${GO_SRC_RELEASE}.tar.gz" - # Compile go and configure the environment - export TAR_OPTIONS="--mtime="$REFERENCE_DATE\\\ $REFERENCE_TIME"" - export BUILD_DIR=`pwd` - tar xf "${GO_SRC_TARBALL}" - rm -f "${GO_SRC_TARBALL}" - [ -d go/ ] - - export GOOS=windows - export GOROOT=${BUILD_DIR}/go - export GOPATH=${BUILD_DIR}/gopath - mkdir -p ${GOPATH}/bin - - export PATH_orig=${PATH} - export PATH=$GOPATH/bin:$GOROOT/bin:$PATH - - export ARCHS='386 amd64' - export GO111MODULE=on - - # Make release tarball - pushd tendermint - VERSION=$(git describe --tags | sed 's/^v//') - COMMIT=$(git rev-parse --short=8 HEAD) - DISTNAME=tendermint-${VERSION} - git archive --format tar.gz --prefix ${DISTNAME}/ -o ${DISTNAME}.tar.gz HEAD - SOURCEDIST=`pwd`/`echo tendermint-*.tar.gz` - popd - - # Correct tar file order - mkdir -p temp - pushd temp - tar xf $SOURCEDIST - rm $SOURCEDIST - find tendermint-* | sort | tar --no-recursion --mode='u+rw,go+r-w,a+X' --owner=0 --group=0 -c -T - | gzip -9n > $SOURCEDIST - popd - - # Prepare GOPATH and install deps - distsrc=${GOPATH}/src/github.com/tendermint/tendermint - mkdir -p ${distsrc} - pushd ${distsrc} - tar --strip-components=1 -xf $SOURCEDIST - go mod download - popd - - # Configure LDFLAGS for reproducible builds - LDFLAGS="-extldflags=-static -buildid=${VERSION} -s -w \ - -X github.com/tendermint/tendermint/version.GitCommit=${COMMIT}" - - # Extract release tarball and build - for arch in ${ARCHS}; do - INSTALLPATH=`pwd`/installed/${DISTNAME}-${arch} - mkdir -p ${INSTALLPATH} - - # Build tendermint binary - pushd ${distsrc} - GOARCH=${arch} GOROOT_FINAL=${GOROOT} go build -a \ - -trimpath \ - -gcflags=all=-trimpath=${GOPATH} \ - -asmflags=all=-trimpath=${GOPATH} \ - -mod=readonly -tags "tendermint" \ - -ldflags="${LDFLAGS}" \ - -o ${INSTALLPATH}/tendermint.exe ./cmd/tendermint/ - - popd # ${distsrc} - - pushd ${INSTALLPATH} - find -type f | sort | tar \ - --no-recursion --mode='u+rw,go+r-w,a+X' \ - --numeric-owner --sort=name \ - --owner=0 --group=0 -c -T - | gzip -9n > ${OUTDIR}/${DISTNAME}-windows-${arch}.tar.gz - popd # installed - done - - rm -rf ${distsrc} - - mkdir -p $OUTDIR/src - mv $SOURCEDIST $OUTDIR/src diff --git a/scripts/gitian-keys/README.md b/scripts/gitian-keys/README.md deleted file mode 100644 index f4ad711a9..000000000 --- a/scripts/gitian-keys/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## PGP keys of Gitian builders and Tendermint Developers - -The file `keys.txt` contains fingerprints of the public keys of Gitian builders -and active developers. - -The associated keys are mainly used to sign git commits or the build results -of Gitian builds. - -The most recent version of each pgp key can be found on most PGP key servers. - -Fetch the latest version from the key server to see if any key was revoked in -the meantime. -To fetch the latest version of all pgp keys in your gpg homedir, - -```sh -gpg --refresh-keys -``` - -To fetch keys of Gitian builders and active core developers, feed the list of -fingerprints of the primary keys into gpg: - -```sh -while read fingerprint keyholder_name; \ -do gpg --keyserver hkp://subset.pool.sks-keyservers.net \ ---recv-keys ${fingerprint}; done < ./keys.txt -``` - -Add your key to the list if you are a Tendermint core developer or you have -provided Gitian signatures for two major or minor releases of Tendermint. diff --git a/scripts/gitian-keys/keys.txt b/scripts/gitian-keys/keys.txt deleted file mode 100644 index 91330ae0b..000000000 --- a/scripts/gitian-keys/keys.txt +++ /dev/null @@ -1 +0,0 @@ -04160004A8276E40BB9890FBE8A48AE5311D765A Alessio Treglia diff --git a/scripts/install-golangci-lint.sh b/scripts/install-golangci-lint.sh deleted file mode 100644 index b95713828..000000000 --- a/scripts/install-golangci-lint.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -f_sha256() { - local l_file - l_file=$1 - python -sBc "import hashlib;print(hashlib.sha256(open('$l_file','rb').read()).hexdigest())" -} - -installer="$(mktemp)" -trap "rm -f ${installer}" EXIT - -GOBIN="${1}" -VERSION="${2}" -HASHSUM="${3}" -CURL="$(which curl)" - -echo "Downloading golangci-lint ${VERSION} installer ..." >&2 -"${CURL}" -sfL "https://raw.githubusercontent.com/golangci/golangci-lint/${VERSION}/install.sh" > "${installer}" - -echo "Checking hashsum ..." >&2 -[ "${HASHSUM}" = "$(f_sha256 ${installer})" ] -chmod +x "${installer}" - -echo "Launching installer ..." >&2 -exec "${installer}" -d -b "${GOBIN}" "${VERSION}" diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 9f6cdb2b6..ef98977d9 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -14,20 +14,11 @@ import ( "os" "strings" - amino "github.com/tendermint/go-amino" - cs "github.com/tendermint/tendermint/consensus" + tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) -var cdc = amino.NewCodec() - -func init() { - cs.RegisterMessages(cdc) - cs.RegisterWALMessages(cdc) - types.RegisterBlockAmino(cdc) -} - func main() { if len(os.Args) < 3 { fmt.Fprintln(os.Stderr, "missing arguments: Usage:json2wal ") @@ -49,7 +40,7 @@ func main() { // the length of tendermint/wal/MsgInfo in the wal.json may exceed the defaultBufSize(4096) of bufio // because of the byte array in BlockPart // leading to unmarshal error: unexpected end of JSON input - br := bufio.NewReaderSize(f, 2*types.BlockPartSizeBytes) + br := bufio.NewReaderSize(f, int(2*types.BlockPartSizeBytes)) dec := cs.NewWALEncoder(walFile) for { @@ -65,7 +56,7 @@ func main() { } var msg cs.TimedWALMessage - err = cdc.UnmarshalJSON(msgJSON, &msg) + err = tmjson.Unmarshal(msgJSON, &msg) if err != nil { panic(fmt.Errorf("failed to unmarshal json: %v", err)) } diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py index 16647c05f..bc446c769 100644 --- a/scripts/linkify_changelog.py +++ b/scripts/linkify_changelog.py @@ -3,11 +3,11 @@ # This script goes through the provided file, and replaces any " \#", # with the valid mark down formatted link to it. e.g. -# " [\#number](https://github.com/tendermint/tendermint/issues/) -# Note that if the number is for a PR, github will auto-redirect you when you click the link. +# " [\#number](https://github.com/tendermint/tendermint/pull/) +# Note that if the number is for a an issue, github will auto-redirect you when you click the link. # It is safe to run the script multiple times in succession. # # Example usage $ python3 linkify_changelog.py ../CHANGELOG_PENDING.md for line in fileinput.input(inplace=1): - line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/issues/\1)", line.rstrip()) + line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/pull/\1)", line.rstrip()) print(line) \ No newline at end of file diff --git a/scripts/localnet-blocks-test.sh b/scripts/localnet-blocks-test.sh deleted file mode 100755 index a33ab00f3..000000000 --- a/scripts/localnet-blocks-test.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -ITERATIONS=$1 -SLEEP=$2 -NUMBLOCKS=$3 -NODEADDR=$4 - -if [ -z "$1" ]; then - echo "Need to input number of iterations to run..." - exit 1 -fi - -if [ -z "$2" ]; then - echo "Need to input number of seconds to sleep between iterations" - exit 1 -fi - -if [ -z "$3" ]; then - echo "Need to input block height to declare completion..." - exit 1 -fi - -if [ -z "$4" ]; then - echo "Need to input node address to poll..." - exit 1 -fi - -I=0 -while [ ${I} -lt "$ITERATIONS" ]; do - var=$(curl -s "$NODEADDR:26657/status" | jq -r ".result.sync_info.latest_block_height") - echo "Number of Blocks: ${var}" - if [ ! -z "${var}" ] && [ "${var}" -gt "${NUMBLOCKS}" ]; then - echo "Number of blocks reached, exiting success..." - exit 0 - fi - I=$((I+1)) - sleep "$SLEEP" -done - -echo "Timeout reached, exiting failure..." -exit 1 diff --git a/scripts/privValUpgrade.go b/scripts/privValUpgrade.go deleted file mode 100644 index 1882a3663..000000000 --- a/scripts/privValUpgrade.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/privval" -) - -var ( - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -) - -func main() { - args := os.Args[1:] - if len(args) != 3 { - fmt.Println("Expected three args: ") - fmt.Println( - "Eg. ~/.tendermint/config/priv_validator.json" + - " ~/.tendermint/config/priv_validator_key.json" + - " ~/.tendermint/data/priv_validator_state.json", - ) - os.Exit(1) - } - err := loadAndUpgrade(args[0], args[1], args[2]) - if err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func loadAndUpgrade(oldPVPath, newPVKeyPath, newPVStatePath string) error { - oldPV, err := privval.LoadOldFilePV(oldPVPath) - if err != nil { - return fmt.Errorf("error reading OldPrivValidator from %v: %v", oldPVPath, err) - } - logger.Info("Upgrading PrivValidator file", - "old", oldPVPath, - "newKey", newPVKeyPath, - "newState", newPVStatePath, - ) - oldPV.Upgrade(newPVKeyPath, newPVStatePath) - return nil -} diff --git a/scripts/privValUpgrade_test.go b/scripts/privValUpgrade_test.go deleted file mode 100644 index 287c4fc50..000000000 --- a/scripts/privValUpgrade_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/privval" -) - -const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20FC25" + - "8973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4CAD312240A20C971B286ACB8AA" + - "A6FCA0365EB0A660B189EDC08B46B5AF2995DEFA51A28D215B10013211746573742D636861696E2D533245415533" - -const oldPrivvalContent = `{ - "address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "r3Yg2AhDZ745CNTpavsGU+mRZ8WpRXqoJuyqjN8mJq0=" - }, - "last_height": "5", - "last_round": "0", - "last_step": 3, - "last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==", - "last_signbytes": "` + lastSignBytes + `", - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ==" - } -}` - -func TestLoadAndUpgrade(t *testing.T) { - - oldFilePath := initTmpOldFile(t) - defer os.Remove(oldFilePath) - newStateFile, err := ioutil.TempFile("", "priv_validator_state*.json") - defer os.Remove(newStateFile.Name()) - require.NoError(t, err) - newKeyFile, err := ioutil.TempFile("", "priv_validator_key*.json") - defer os.Remove(newKeyFile.Name()) - require.NoError(t, err) - emptyOldFile, err := ioutil.TempFile("", "priv_validator_empty*.json") - require.NoError(t, err) - defer os.Remove(emptyOldFile.Name()) - - type args struct { - oldPVPath string - newPVKeyPath string - newPVStatePath string - } - tests := []struct { - name string - args args - wantErr bool - wantPanic bool - }{ - {"successful upgrade", - args{oldPVPath: oldFilePath, newPVKeyPath: newKeyFile.Name(), newPVStatePath: newStateFile.Name()}, - false, false, - }, - {"unsuccessful upgrade: empty old privval file", - args{oldPVPath: emptyOldFile.Name(), newPVKeyPath: newKeyFile.Name(), newPVStatePath: newStateFile.Name()}, - true, false, - }, - {"unsuccessful upgrade: invalid new paths (1/3)", - args{oldPVPath: oldFilePath, newPVKeyPath: "", newPVStatePath: newStateFile.Name()}, - false, true, - }, - {"unsuccessful upgrade: invalid new paths (2/3)", - args{oldPVPath: oldFilePath, newPVKeyPath: newKeyFile.Name(), newPVStatePath: ""}, - false, true, - }, - {"unsuccessful upgrade: invalid new paths (3/3)", - args{oldPVPath: oldFilePath, newPVKeyPath: "", newPVStatePath: ""}, - false, true, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // need to re-write the file everytime because upgrading renames it - err := ioutil.WriteFile(oldFilePath, []byte(oldPrivvalContent), 0600) - require.NoError(t, err) - if tt.wantPanic { - require.Panics(t, func() { loadAndUpgrade(tt.args.oldPVPath, tt.args.newPVKeyPath, tt.args.newPVStatePath) }) - } else { - err = loadAndUpgrade(tt.args.oldPVPath, tt.args.newPVKeyPath, tt.args.newPVStatePath) - if tt.wantErr { - assert.Error(t, err) - fmt.Println("ERR", err) - } else { - assert.NoError(t, err) - upgradedPV := privval.LoadFilePV(tt.args.newPVKeyPath, tt.args.newPVStatePath) - oldPV, err := privval.LoadOldFilePV(tt.args.oldPVPath + ".bak") - require.NoError(t, err) - - assert.Equal(t, oldPV.Address, upgradedPV.Key.Address) - assert.Equal(t, oldPV.Address, upgradedPV.GetAddress()) - assert.Equal(t, oldPV.PubKey, upgradedPV.Key.PubKey) - upv, err := upgradedPV.GetPubKey() - require.NoError(t, err) - assert.Equal(t, oldPV.PubKey, upv) - assert.Equal(t, oldPV.PrivKey, upgradedPV.Key.PrivKey) - - assert.Equal(t, oldPV.LastHeight, upgradedPV.LastSignState.Height) - assert.Equal(t, oldPV.LastRound, upgradedPV.LastSignState.Round) - assert.Equal(t, oldPV.LastSignature, upgradedPV.LastSignState.Signature) - assert.Equal(t, oldPV.LastSignBytes, upgradedPV.LastSignState.SignBytes) - assert.Equal(t, oldPV.LastStep, upgradedPV.LastSignState.Step) - - } - } - }) - } -} - -func initTmpOldFile(t *testing.T) string { - tmpfile, err := ioutil.TempFile("", "priv_validator_*.json") - require.NoError(t, err) - t.Logf("created test file %s", tmpfile.Name()) - _, err = tmpfile.WriteString(oldPrivvalContent) - require.NoError(t, err) - - return tmpfile.Name() -} diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh old mode 100644 new mode 100755 index e6226dc1a..51b1cc6d3 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -2,10 +2,8 @@ set -eo pipefail -proto_dirs=$(find . -path ./third_party -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) -for dir in $proto_dirs; do - protoc \ - -I. \ - --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative:. \ - $(find "${dir}" -name '*.proto') -done +buf generate --path proto/tendermint + +mv ./proto/tendermint/abci/types.pb.go ./abci/types + +mv ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc diff --git a/scripts/publish.sh b/scripts/publish.sh deleted file mode 100755 index 7da299aaf..000000000 --- a/scripts/publish.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -VERSION=$1 -DIST_DIR=./build/dist - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" 'TMCoreSemVer =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Copying ${DIST_DIR} to S3..." - -# copy to s3 -aws s3 cp --recursive ${DIST_DIR} s3://tendermint/binaries/tendermint/v${VERSION} --acl public-read - -exit 0 diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 8c40d36b6..000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" 'TMCoreSemVer =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Releasing version $VERSION..." - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd "$DIR" - -# Building binaries -sh -c "'$DIR/scripts/dist.sh'" - -# Pushing binaries to S3 -sh -c "'$DIR/scripts/publish.sh'" - -# echo "==> Crafting a Github release" -# today=$(date +"%B-%d-%Y") -# ghr -b "https://github.com/tendermint/tendermint/blob/master/CHANGELOG.md#${VERSION//.}-${today,}" "v$VERSION" "$DIR/build/dist" - -# Build and push Docker image - -## Get SHA256SUM of the linux archive -SHA256SUM=$(shasum -a256 "${DIR}/build/dist/tendermint_${VERSION}_linux_amd64.zip" | awk '{print $1;}') - -## Replace TM_VERSION and TM_SHA256SUM with the new values -sed -i -e "s/TM_VERSION .*/TM_VERSION $VERSION/g" "$DIR/DOCKER/Dockerfile" -sed -i -e "s/TM_SHA256SUM .*/TM_SHA256SUM $SHA256SUM/g" "$DIR/DOCKER/Dockerfile" -git commit -m "update Dockerfile" -a "$DIR/DOCKER/Dockerfile" -echo "==> TODO: update DOCKER/README.md (latest Dockerfile's hash is $(git rev-parse HEAD)) and copy it's content to https://store.docker.com/community/images/tendermint/tendermint" - -pushd "$DIR/DOCKER" - -## Build Docker image -TAG=$VERSION sh -c "'./build.sh'" - -## Push Docker image -TAG=$VERSION sh -c "'./push.sh'" - -popd - -exit 0 diff --git a/scripts/release_management/README.md b/scripts/release_management/README.md deleted file mode 100644 index e92f1ccf6..000000000 --- a/scripts/release_management/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Release management scripts - -## Overview -The scripts in this folder are used for release management in CircleCI. Although the scripts are fully configurable using input parameters, -the default settings were modified to accommodate CircleCI execution. - -# Build scripts -These scripts help during the build process. They prepare the release files. - -## bump-semver.py -Bumps the semantic version of the input `--version`. Versions are expected in vMAJOR.MINOR.PATCH format or vMAJOR.MINOR format. - -In vMAJOR.MINOR format, the result will be patch version 0 of that version, for example `v1.2 -> v1.2.0`. - -In vMAJOR.MINOR.PATCH format, the result will be a bumped PATCH version, for example `v1.2.3 -> v1.2.4`. - -If the PATCH number contains letters, it is considered a development version, in which case, the result is the non-development version of that number. -The patch number will not be bumped, only the "-dev" or similar additional text will be removed. For example: `v1.2.6-rc1 -> v1.2.6`. - -## zip-file.py -Specialized ZIP command for release management. Special features: -1. Uses Python ZIP libaries, so the `zip` command does not need to be installed. -1. Can only zip one file. -1. Optionally gets file version, Go OS and architecture. -1. By default all inputs and output is formatted exactly how CircleCI needs it. - -By default, the command will try to ZIP the file at `build/tendermint_${GOOS}_${GOARCH}`. -This can be changed with the `--file` input parameter. - -By default, the command will output the ZIP file to `build/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. -This can be changed with the `--destination` (folder), `--version`, `--goos` and `--goarch` input parameters respectively. - -## sha-files.py -Specialized `shasum` command for release management. Special features: -1. Reads all ZIP files in the given folder. -1. By default all inputs and output is formatted exactly how CircleCI needs it. - -By default, the command will look up all ZIP files in the `build/` folder. - -By default, the command will output results into the `build/SHA256SUMS` file. - -# GitHub management -Uploading build results to GitHub requires at least these steps: -1. Create a new release on GitHub with content -2. Upload all binaries to the release -3. Publish the release -The below scripts help with these steps. - -## github-draft.py -Creates a GitHub release and fills the content with the CHANGELOG.md link. The version number can be changed by the `--version` parameter. - -By default, the command will use the tendermint/tendermint organization/repo, which can be changed using the `--org` and `--repo` parameters. - -By default, the command will get the version number from the `${CIRCLE_TAG}` variable. - -Returns the GitHub release ID. - -## github-upload.py -Upload a file to a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. - -By default, the command will upload the file `/tmp/workspace/tendermint_${CIRCLE_TAG}_${GOOS}_${GOARCH}.zip`. This can be changed by the `--file` input parameter. - -## github-publish.py -Publish a GitHub release. The release is defined by the mandatory `--id` (release ID) input parameter. - diff --git a/scripts/release_management/bump-semver.py b/scripts/release_management/bump-semver.py deleted file mode 100755 index ce56d8d7c..000000000 --- a/scripts/release_management/bump-semver.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# Bump the release number of a semantic version number and print it. --version is required. -# Version is -# - vA.B.C, in which case vA.B.C+1 will be returned -# - vA.B.C-devorwhatnot in which case vA.B.C will be returned -# - vA.B in which case vA.B.0 will be returned - -import re -import argparse -import sys - - -def semver(ver): - if re.match('v[0-9]+\.[0-9]+',ver) is None: - ver="v0.0" - #raise argparse.ArgumentTypeError('--version must be a semantic version number with major, minor and patch numbers') - return ver - - -def get_tendermint_version(): - """Extracts the current Tendermint version from version/version.go""" - pattern = re.compile(r"TMCoreSemVer = \"(?P([0-9.]+)+)\"") - with open("version/version.go", "rt") as version_file: - for line in version_file: - m = pattern.search(line) - if m: - return m.group('version') - - return None - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--version", help="Version number to bump, e.g.: v1.0.0", required=True, type=semver) - args = parser.parse_args() - - found = re.match('(v[0-9]+\.[0-9]+)(\.(.+))?', args.version) - majorminorprefix = found.group(1) - patch = found.group(3) - if patch is None: - patch = "0-new" - - if re.match('[0-9]+$',patch) is None: - patchfound = re.match('([0-9]+)',patch) - patch = int(patchfound.group(1)) - else: - patch = int(patch) + 1 - - expected_version = "{0}.{1}".format(majorminorprefix, patch) - # if we're doing a release - if expected_version != "v0.0.0": - cur_version = get_tendermint_version() - if not cur_version: - print("Failed to obtain Tendermint version from version/version.go") - sys.exit(1) - expected_version_noprefix = expected_version.lstrip("v") - if expected_version_noprefix != "0.0.0" and expected_version_noprefix != cur_version: - print("Expected version/version.go#TMCoreSemVer to be {0}, but was {1}".format(expected_version_noprefix, cur_version)) - sys.exit(1) - - print(expected_version) diff --git a/scripts/release_management/github-draft.py b/scripts/release_management/github-draft.py deleted file mode 100755 index 8a189d53e..000000000 --- a/scripts/release_management/github-draft.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -# Create a draft release on GitHub. By default in the tendermint/tendermint repo. -# Optimized for CircleCI - -import argparse -import httplib -import json -import os -from base64 import b64encode - -def request(org, repo, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/releases'.format(org,repo), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print("{0}: {1}".format(response.status, response.reason)) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -def create_draft(org,repo,branch,version): - draft = { - 'tag_name': version, - 'target_commitish': '{0}'.format(branch), - 'name': '{0} (WARNING: ALPHA SOFTWARE)'.format(version), - 'body': 'https://github.com/{0}/{1}/blob/{2}/CHANGELOG.md#{3}'.format(org,repo,branch,version.replace('.','')), - 'draft': True, - 'prerelease': False - } - data=json.dumps(draft) - return request(org, repo, data) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization") - parser.add_argument("--repo", default="tendermint", help="GitHub repository") - parser.add_argument("--branch", default=os.environ.get('CIRCLE_BRANCH'), help="Branch to build from, e.g.: v1.0") - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('environment variable GITHUB_USERNAME is required') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('environment variable GITHUB_TOKEN is required') - - release = create_draft(args.org,args.repo,args.branch,args.version) - - print(release["id"]) - diff --git a/scripts/release_management/github-openpr.py b/scripts/release_management/github-openpr.py deleted file mode 100755 index af0434f02..000000000 --- a/scripts/release_management/github-openpr.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -# Open a PR against the develop branch. --branch required. -# Optimized for CircleCI - -import json -import os -import argparse -import httplib -from base64 import b64encode - - -def request(org, repo, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/pulls'.format(org,repo), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization. Defaults to tendermint.") - parser.add_argument("--repo", default="tendermint", help="GitHub repository. Defaults to tendermint.") - parser.add_argument("--head", help="The name of the branch where your changes are implemented.", required=True) - parser.add_argument("--base", help="The name of the branch you want the changes pulled into.", required=True) - parser.add_argument("--title", default="Security release {0}".format(os.environ.get('CIRCLE_TAG')), help="The title of the pull request.") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - if os.environ.get('CIRCLE_TAG') is None: - raise parser.error('CIRCLE_TAG not set.') - - result = request(args.org, args.repo, data=json.dumps({'title':"{0}".format(args.title),'head':"{0}".format(args.head),'base':"{0}".format(args.base),'body':""})) - print(result['html_url']) diff --git a/scripts/release_management/github-public-newbranch.bash b/scripts/release_management/github-public-newbranch.bash deleted file mode 100644 index ca2fa1314..000000000 --- a/scripts/release_management/github-public-newbranch.bash +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# github-public-newbranch.bash - create public branch from the security repository - -set -euo pipefail - -# Create new branch -BRANCH="${CIRCLE_TAG:-v0.0.0}-security-`date -u +%Y%m%d%H%M%S`" -# Check if the patch release exist already as a branch -if [ -n "`git branch | grep '${BRANCH}'`" ]; then - echo "WARNING: Branch ${BRANCH} already exists." -else - echo "Creating branch ${BRANCH}." - git branch "${BRANCH}" -fi - -# ... and check it out -git checkout "${BRANCH}" - -# Add entry to public repository -git remote add tendermint-origin git@github.com:tendermint/tendermint.git - -# Push branch and tag to public repository -git push tendermint-origin -git push tendermint-origin --tags - -# Create a PR from the public branch to the assumed release branch in public (release branch has to exist) -python -u scripts/release_management/github-openpr.py --head "${BRANCH}" --base "${BRANCH:%.*}" diff --git a/scripts/release_management/github-publish.py b/scripts/release_management/github-publish.py deleted file mode 100755 index 31071aecd..000000000 --- a/scripts/release_management/github-publish.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python - -# Publish an existing GitHub draft release. --id required. -# Optimized for CircleCI - -import json -import os -import argparse -import httplib -from base64 import b64encode - - -def request(org, repo, id, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3+json', - 'Authorization': 'Basic %s' % user_and_pass - } - - conn = httplib.HTTPSConnection('api.github.com', timeout=5) - conn.request('POST', '/repos/{0}/{1}/releases/{2}'.format(org,repo,id), data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--org", default="tendermint", help="GitHub organization") - parser.add_argument("--repo", default="tendermint", help="GitHub repository") - parser.add_argument("--id", help="GitHub release ID", required=True, type=int) - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for the release, e.g.: v1.0.0") - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - try: - result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}".format(args.version)})) - except IOError as e: - print(e) - result = request(args.org, args.repo, args.id, data=json.dumps({'draft':False,'tag_name':"{0}-autorelease".format(args.version)})) - - print(result['name']) diff --git a/scripts/release_management/github-upload.py b/scripts/release_management/github-upload.py deleted file mode 100755 index 77c76a755..000000000 --- a/scripts/release_management/github-upload.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python - -# Upload a file to a GitHub draft release. --id and --file are required. -# Optimized for CircleCI - -import json -import os -import re -import argparse -import mimetypes -import httplib -from base64 import b64encode - - -def request(baseurl, path, mimetype, mimeencoding, data): - user_and_pass = b64encode(b"{0}:{1}".format(os.environ['GITHUB_USERNAME'], os.environ['GITHUB_TOKEN'])).decode("ascii") - - headers = { - 'User-Agent': 'tenderbot', - 'Accept': 'application/vnd.github.v3.raw+json', - 'Authorization': 'Basic %s' % user_and_pass, - 'Content-Type': mimetype, - 'Content-Encoding': mimeencoding - } - - conn = httplib.HTTPSConnection(baseurl, timeout=5) - conn.request('POST', path, data, headers) - response = conn.getresponse() - if response.status < 200 or response.status > 299: - print(response) - conn.close() - raise IOError(response.reason) - responsedata = response.read() - conn.close() - return json.loads(responsedata) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--id", help="GitHub release ID", required=True, type=int) - parser.add_argument("--file", default="/tmp/workspace/tendermint_{0}_{1}_{2}.zip".format(os.environ.get('CIRCLE_TAG'),os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to upload") - parser.add_argument("--return-id-only", help="Return only the release ID after upload to GitHub.", action='store_true') - args = parser.parse_args() - - if not os.environ.has_key('GITHUB_USERNAME'): - raise parser.error('GITHUB_USERNAME not set.') - - if not os.environ.has_key('GITHUB_TOKEN'): - raise parser.error('GITHUB_TOKEN not set.') - - mimetypes.init() - filename = os.path.basename(args.file) - mimetype,mimeencoding = mimetypes.guess_type(filename, strict=False) - if mimetype is None: - mimetype = 'application/zip' - if mimeencoding is None: - mimeencoding = 'utf8' - - with open(args.file,'rb') as f: - asset = f.read() - - result = request('uploads.github.com', '/repos/tendermint/tendermint/releases/{0}/assets?name={1}'.format(args.id, filename), mimetype, mimeencoding, asset) - - if args.return_id_only: - print(result['id']) - else: - print(result['browser_download_url']) - diff --git a/scripts/release_management/sha-files.py b/scripts/release_management/sha-files.py deleted file mode 100755 index 2a9ee0d59..000000000 --- a/scripts/release_management/sha-files.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -# Create SHA256 summaries from all ZIP files in a folder -# Optimized for CircleCI - -import re -import os -import argparse -import zipfile -import hashlib - - -BLOCKSIZE = 65536 - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--folder", default="/tmp/workspace", help="Folder to look for, for ZIP files") - parser.add_argument("--shafile", default="/tmp/workspace/SHA256SUMS", help="SHA256 summaries File") - args = parser.parse_args() - - for filename in os.listdir(args.folder): - if re.search('\.zip$',filename) is None: - continue - if not os.path.isfile(os.path.join(args.folder, filename)): - continue - with open(args.shafile,'a+') as shafile: - hasher = hashlib.sha256() - with open(os.path.join(args.folder, filename),'r') as f: - buf = f.read(BLOCKSIZE) - while len(buf) > 0: - hasher.update(buf) - buf = f.read(BLOCKSIZE) - shafile.write("{0} {1}\n".format(hasher.hexdigest(),filename)) - diff --git a/scripts/release_management/zip-file.py b/scripts/release_management/zip-file.py deleted file mode 100755 index 5d2f5b2c8..000000000 --- a/scripts/release_management/zip-file.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python - -# ZIP one file as "tendermint" into a ZIP like tendermint_VERSION_OS_ARCH.zip -# Use environment variables CIRCLE_TAG, GOOS and GOARCH for easy input parameters. -# Optimized for CircleCI - -import os -import argparse -import zipfile -import hashlib - - -BLOCKSIZE = 65536 - - -def zip_asset(file,destination,arcname,version,goos,goarch): - filename = os.path.basename(file) - output = "{0}/{1}_{2}_{3}_{4}.zip".format(destination,arcname,version,goos,goarch) - - with zipfile.ZipFile(output,'w') as f: - f.write(filename=file,arcname=arcname) - f.comment=filename - return output - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--file", default="build/tendermint_{0}_{1}".format(os.environ.get('GOOS'),os.environ.get('GOARCH')), help="File to zip") - parser.add_argument("--destination", default="build", help="Destination folder for files") - parser.add_argument("--version", default=os.environ.get('CIRCLE_TAG'), help="Version number for binary, e.g.: v1.0.0") - parser.add_argument("--goos", default=os.environ.get('GOOS'), help="GOOS parameter") - parser.add_argument("--goarch", default=os.environ.get('GOARCH'), help="GOARCH parameter") - args = parser.parse_args() - - if args.version is None: - raise parser.error("argument --version is required") - if args.goos is None: - raise parser.error("argument --goos is required") - if args.goarch is None: - raise parser.error("argument --goarch is required") - - file = zip_asset(args.file,args.destination,"tendermint",args.version,args.goos,args.goarch) - print(file) - diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 181f40c75..6fa890522 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -12,20 +12,10 @@ import ( "io" "os" - amino "github.com/tendermint/go-amino" - cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/types" + tmjson "github.com/tendermint/tendermint/libs/json" ) -var cdc = amino.NewCodec() - -func init() { - cs.RegisterMessages(cdc) - cs.RegisterWALMessages(cdc) - types.RegisterBlockAmino(cdc) -} - func main() { if len(os.Args) < 2 { fmt.Println("missing one argument: ") @@ -47,7 +37,7 @@ func main() { panic(fmt.Errorf("failed to decode msg: %v", err)) } - json, err := cdc.MarshalJSON(msg) + json, err := tmjson.Marshal(msg) if err != nil { panic(fmt.Errorf("failed to marshal msg: %v", err)) } @@ -56,14 +46,17 @@ func main() { if err == nil { _, err = os.Stdout.Write([]byte("\n")) } + if err == nil { if endMsg, ok := msg.Msg.(cs.EndHeightMessage); ok { - _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", endMsg.Height))) // nolint: errcheck, gas + _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", endMsg.Height))) } } + if err != nil { fmt.Println("Failed to write message", err) - os.Exit(1) + os.Exit(1) //nolint:gocritic } + } } diff --git a/state/codec.go b/state/codec.go deleted file mode 100644 index df2c15545..000000000 --- a/state/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package state - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - cryptoamino.RegisterAmino(cdc) -} diff --git a/state/execution.go b/state/execution.go index cd45484ce..241c15e93 100644 --- a/state/execution.go +++ b/state/execution.go @@ -1,15 +1,17 @@ package state import ( + "errors" "fmt" "time" - dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -22,7 +24,7 @@ import ( // BlockExecutor provides the context and accessories for properly executing a block. type BlockExecutor struct { // save state, validators, consensus params, abci responses here - db dbm.DB + store Store // execute the app against this proxyApp proxy.AppConnConsensus @@ -51,7 +53,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. func NewBlockExecutor( - db dbm.DB, + stateStore Store, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, @@ -59,7 +61,7 @@ func NewBlockExecutor( options ...BlockExecutorOption, ) *BlockExecutor { res := &BlockExecutor{ - db: db, + store: stateStore, proxyApp: proxyApp, eventBus: types.NopEventBus{}, mempool: mempool, @@ -75,8 +77,8 @@ func NewBlockExecutor( return res } -func (blockExec *BlockExecutor) DB() dbm.DB { - return blockExec.db +func (blockExec *BlockExecutor) Store() Store { + return blockExec.store } // SetEventBus - sets the event bus for publishing block related events. @@ -98,12 +100,11 @@ func (blockExec *BlockExecutor) CreateProposalBlock( maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas - // Fetch a limited amount of valid evidence - maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBytes) - evidence := blockExec.evpool.PendingEvidence(maxNumEvidence) + evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) // Fetch a limited amount of valid txs - maxDataBytes := types.MaxDataBytes(maxBytes, state.Validators.Size(), len(evidence)) + maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) + txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) return state.MakeBlock(height, txs, commit, evidence, proposerAddr) @@ -114,7 +115,11 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - return validateBlock(blockExec.evpool, blockExec.db, state, block) + err := validateBlock(state, block) + if err != nil { + return err + } + return blockExec.evpool.CheckEvidence(block.Evidence.Evidence) } // ApplyBlock validates the block against the state, executes it against the app, @@ -127,12 +132,13 @@ func (blockExec *BlockExecutor) ApplyBlock( state State, blockID types.BlockID, block *types.Block, ) (State, int64, error) { - if err := blockExec.ValidateBlock(state, block); err != nil { + if err := validateBlock(state, block); err != nil { return state, 0, ErrInvalidBlock(err) } startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, blockExec.db) + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, + blockExec.store, state.InitialHeight) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { @@ -142,7 +148,9 @@ func (blockExec *BlockExecutor) ApplyBlock( fail.Fail() // XXX // Save the results before we commit. - SaveABCIResponses(blockExec.db, block.Height, abciResponses) + if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { + return state, 0, err + } fail.Fail() // XXX @@ -172,14 +180,16 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, 0, fmt.Errorf("commit failed for application: %v", err) } - // Update evpool with the block and state. - blockExec.evpool.Update(block, state) + // Update evpool with the latest state. + blockExec.evpool.Update(state, block.Evidence.Evidence) fail.Fail() // XXX // Update the app hash and save the state. state.AppHash = appHash - SaveState(blockExec.db, state) + if err := blockExec.store.Save(state); err != nil { + return state, 0, err + } fail.Fail() // XXX @@ -251,12 +261,15 @@ func execBlockOnProxyApp( logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, - stateDB dbm.DB, -) (*ABCIResponses, error) { + store Store, + initialHeight int64, +) (*tmstate.ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 txIndex := 0 - abciResponses := NewABCIResponses(block) + abciResponses := new(tmstate.ABCIResponses) + dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) + abciResponses.DeliverTxs = dtxs // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { @@ -277,13 +290,22 @@ func execBlockOnProxyApp( } proxyAppConn.SetResponseCallback(proxyCb) - commitInfo, byzVals := getBeginBlockValidatorInfo(block, stateDB) + commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) + + byzVals := make([]abci.Evidence, 0) + for _, evidence := range block.Evidence.Evidence { + byzVals = append(byzVals, evidence.ABCI()...) + } // Begin block var err error + pbh := block.Header.ToProto() + if pbh == nil { + return nil, errors.New("nil header") + } abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ Hash: block.Hash(), - Header: types.TM2PB.Header(&block.Header), + Header: *pbh, LastCommitInfo: commitInfo, ByzantineValidators: byzVals, }) @@ -312,13 +334,14 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { +func getBeginBlockValidatorInfo(block *types.Block, store Store, + initialHeight int64) abci.LastCommitInfo { voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) - // block.Height=1 -> LastCommitInfo.Votes are empty. + // Initial block -> LastCommitInfo.Votes are empty. // Remember that the first LastCommit is intentionally empty, so it makes // sense for LastCommitInfo.Votes to also be empty. - if block.Height > 1 { - lastValSet, err := LoadValidators(stateDB, block.Height-1) + if block.Height > initialHeight { + lastValSet, err := store.LoadValidators(block.Height - 1) if err != nil { panic(err) } @@ -343,26 +366,14 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo } } - byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) - for i, ev := range block.Evidence.Evidence { - // We need the validator set. We already did this in validateBlock. - // TODO: Should we instead cache the valset in the evidence itself and add - // `SetValidatorSet()` and `ToABCI` methods ? - valset, err := LoadValidators(stateDB, ev.Height()) - if err != nil { - panic(err) - } - byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) - } - return abci.LastCommitInfo{ - Round: int32(block.LastCommit.Round), + Round: block.LastCommit.Round, Votes: voteInfos, - }, byzVals + } } func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, - params types.ValidatorParams) error { + params tmproto.ValidatorParams) error { for _, valUpdate := range abciUpdates { if valUpdate.GetPower() < 0 { return fmt.Errorf("voting power can't be negative %v", valUpdate) @@ -373,10 +384,14 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, } // Check if validator's pubkey matches an ABCI type in the consensus params - thisKeyType := valUpdate.PubKey.Type - if !params.IsValidPubkeyType(thisKeyType) { + pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) + if err != nil { + return err + } + + if !types.IsValidPubkeyType(params, pk.Type()) { return fmt.Errorf("validator %v is using pubkey %s, which is unsupported for consensus", - valUpdate, thisKeyType) + valUpdate, pk.Type()) } } return nil @@ -387,7 +402,7 @@ func updateState( state State, blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses, + abciResponses *tmstate.ABCIResponses, validatorUpdates []*types.Validator, ) (State, error) { @@ -414,16 +429,18 @@ func updateState( lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if abciResponses.EndBlock.ConsensusParamUpdates != nil { // NOTE: must not mutate s.ConsensusParams - nextParams = state.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) - err := nextParams.Validate() + nextParams = types.UpdateConsensusParams(state.ConsensusParams, abciResponses.EndBlock.ConsensusParamUpdates) + err := types.ValidateConsensusParams(nextParams) if err != nil { return state, fmt.Errorf("error updating consensus params: %v", err) } + + state.Version.Consensus.App = nextParams.Version.AppVersion + // Change results from this height but only applies to the next height. lastHeightParamsChanged = header.Height + 1 } - // TODO: allow app to upgrade version nextVersion := state.Version // NOTE: the AppHash has not been populated. @@ -431,6 +448,7 @@ func updateState( return State{ Version: nextVersion, ChainID: state.ChainID, + InitialHeight: state.InitialHeight, LastBlockHeight: header.Height, LastBlockID: blockID, LastBlockTime: header.Time, @@ -440,7 +458,7 @@ func updateState( LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, LastHeightConsensusParamsChanged: lastHeightParamsChanged, - LastResultsHash: abciResponses.ResultsHash(), + LastResultsHash: ABCIResponsesResultsHash(abciResponses), AppHash: nil, }, nil } @@ -452,33 +470,52 @@ func fireEvents( logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, - abciResponses *ABCIResponses, + abciResponses *tmstate.ABCIResponses, validatorUpdates []*types.Validator, ) { - eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ Block: block, ResultBeginBlock: *abciResponses.BeginBlock, ResultEndBlock: *abciResponses.EndBlock, - }) - eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + }); err != nil { + logger.Error("Error publishing new block", "err", err) + } + if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: block.Header, NumTxs: int64(len(block.Txs)), ResultBeginBlock: *abciResponses.BeginBlock, ResultEndBlock: *abciResponses.EndBlock, - }) + }); err != nil { + logger.Error("Error publishing new block header", "err", err) + } + + if len(block.Evidence.Evidence) != 0 { + for _, ev := range block.Evidence.Evidence { + if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + Evidence: ev, + Height: block.Height, + }); err != nil { + logger.Error("Error publishing new evidence", "err", err) + } + } + } for i, tx := range block.Data.Txs { - eventBus.PublishEventTx(types.EventDataTx{TxResult: types.TxResult{ + if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, Result: *(abciResponses.DeliverTxs[i]), - }}) + }}); err != nil { + logger.Error("Error publishing event TX", "err", err) + } } if len(validatorUpdates) > 0 { - eventBus.PublishEventValidatorSetUpdates( - types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}) + if err := eventBus.PublishEventValidatorSetUpdates( + types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { + logger.Error("Error publishing event", "err", err) + } } } @@ -491,9 +528,10 @@ func ExecCommitBlock( appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, - stateDB dbm.DB, + store Store, + initialHeight int64, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, stateDB) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index 041f232bd..3d7fa93ab 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -6,48 +6,55 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/mock" + mmock "github.com/tendermint/tendermint/mempool/mock" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) var ( - chainID = "execution_chain" - testPartSize = 65536 - nTxsPerBlock = 10 + chainID = "execution_chain" + testPartSize uint32 = 65536 + nTxsPerBlock = 10 ) func TestApplyBlock(t *testing.T) { - app := kvstore.NewApplication() - app.RetainBlocks = 1 + app := &testApp{} cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.MockEvidencePool{}) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mmock.Mempool{}, sm.EmptyEvidencePool{}) block := makeBlock(state, 1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - _, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) + state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) assert.EqualValues(t, retainHeight, 1) // TODO check state and mempool + assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") } // TestBeginBlockValidators ensures we send absent validators list. @@ -57,13 +64,14 @@ func TestBeginBlockValidators(t *testing.T) { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) + stateStore := sm.NewStore(stateDB) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts} + prevBlockID := types.BlockID{Hash: prevHash, PartSetHeader: prevParts} var ( now = tmtime.Now() @@ -94,7 +102,7 @@ func TestBeginBlockValidators(t *testing.T) { // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed @@ -119,112 +127,133 @@ func TestBeginBlockByzantineValidators(t *testing.T) { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() - - state, stateDB, _ := makeState(2, 12) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + state, stateDB, privVals := makeState(1, 1) + stateStore := sm.NewStore(stateDB) + + defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + privVal := privVals[state.Validators.Validators[0].Address.String()] + blockID := makeBlockID([]byte("headerhash"), 1000, []byte("partshash")) + header := &types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: state.ChainID, + Height: 10, + Time: defaultEvidenceTime, + LastBlockID: blockID, + LastCommitHash: crypto.CRandBytes(tmhash.Size), + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: state.Validators.Hash(), + NextValidatorsHash: state.Validators.Hash(), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: crypto.CRandBytes(tmhash.Size), + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + } - prevHash := state.LastBlockID.Hash - prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{Hash: prevHash, PartsHeader: prevParts} + // we don't need to worry about validating the evidence as long as they pass validate basic + dve := types.NewMockDuplicateVoteEvidenceWithValidator(3, defaultEvidenceTime, privVal, state.ChainID) + dve.ValidatorPower = 1000 + lcae := &types.LightClientAttackEvidence{ + ConflictingBlock: &types.LightBlock{ + SignedHeader: &types.SignedHeader{ + Header: header, + Commit: types.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []types.CommitSig{{ + BlockIDFlag: types.BlockIDFlagNil, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: defaultEvidenceTime, + Signature: crypto.CRandBytes(types.MaxSignatureSize), + }}), + }, + ValidatorSet: state.Validators, + }, + CommonHeight: 8, + ByzantineValidators: []*types.Validator{state.Validators.Validators[0]}, + TotalVotingPower: 12, + Timestamp: defaultEvidenceTime, + } - height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address - height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address - ev1 := types.NewMockEvidence(height1, time.Now(), idx1, val1) - ev2 := types.NewMockEvidence(height2, time.Now(), idx2, val2) + ev := []types.Evidence{dve, lcae} - now := tmtime.Now() - valSet := state.Validators - testCases := []struct { - desc string - evidence []types.Evidence - expectedByzantineValidators []abci.Evidence - }{ - {"none byzantine", []types.Evidence{}, []abci.Evidence{}}, - {"one byzantine", []types.Evidence{ev1}, []abci.Evidence{types.TM2PB.Evidence(ev1, valSet, now)}}, - {"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{ - types.TM2PB.Evidence(ev1, valSet, now), - types.TM2PB.Evidence(ev2, valSet, now)}}, + abciEv := []abci.Evidence{ + { + Type: abci.EvidenceType_DUPLICATE_VOTE, + Height: 3, + Time: defaultEvidenceTime, + Validator: types.TM2PB.Validator(state.Validators.Validators[0]), + TotalVotingPower: 10, + }, + { + Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, + Height: 8, + Time: defaultEvidenceTime, + Validator: types.TM2PB.Validator(state.Validators.Validators[0]), + TotalVotingPower: 12, + }, } - var ( - commitSig0 = types.NewCommitSigForBlock( - []byte("Signature1"), - state.Validators.Validators[0].Address, - now) - commitSig1 = types.NewCommitSigForBlock( - []byte("Signature2"), - state.Validators.Validators[1].Address, - now) - ) - commitSigs := []types.CommitSig{commitSig0, commitSig1} - lastCommit := types.NewCommit(9, 0, prevBlockID, commitSigs) - for _, tc := range testCases { + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return(ev, int64(100)) + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) - block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) - block.Time = now - block.Evidence.Evidence = tc.evidence - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) - require.Nil(t, err, tc.desc) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), + mmock.Mempool{}, evpool) - // -> app must receive an index of the byzantine validator - assert.Equal(t, tc.expectedByzantineValidators, app.ByzantineValidators, tc.desc) - } + block := makeBlock(state, 1) + block.Evidence = types.EvidenceData{Evidence: ev} + block.Header.EvidenceHash = block.Evidence.Hash() + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + + state, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) + require.Nil(t, err) + assert.EqualValues(t, retainHeight, 1) + + // TODO check state and mempool + assert.Equal(t, abciEv, app.ByzantineValidators) } func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() + pk1, err := cryptoenc.PubKeyToProto(pubkey1) + assert.NoError(t, err) + pk2, err := cryptoenc.PubKeyToProto(pubkey2) + assert.NoError(t, err) - secpKey := secp256k1.GenPrivKey().PubKey() - - defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} + defaultValidatorParams := tmproto.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} testCases := []struct { name string abciUpdates []abci.ValidatorUpdate - validatorParams types.ValidatorParams + validatorParams tmproto.ValidatorParams shouldErr bool }{ { "adding a validator is OK", - - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: pk2, Power: 20}}, defaultValidatorParams, - false, }, { "updating a validator is OK", - - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: pk1, Power: 20}}, defaultValidatorParams, - false, }, { "removing a validator is OK", - - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, defaultValidatorParams, - false, }, { "adding a validator with negative power results in error", - - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, + []abci.ValidatorUpdate{{PubKey: pk2, Power: -100}}, defaultValidatorParams, - - true, - }, - { - "adding a validator with pubkey thats not in validator params results in error", - - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(secpKey), Power: -100}}, - defaultValidatorParams, - true, }, } @@ -248,6 +277,11 @@ func TestUpdateValidators(t *testing.T) { pubkey2 := ed25519.GenPrivKey().PubKey() val2 := types.NewValidator(pubkey2, 20) + pk, err := cryptoenc.PubKeyToProto(pubkey1) + require.NoError(t, err) + pk2, err := cryptoenc.PubKeyToProto(pubkey2) + require.NoError(t, err) + testCases := []struct { name string @@ -259,37 +293,29 @@ func TestUpdateValidators(t *testing.T) { }{ { "adding a validator is OK", - types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, - + []abci.ValidatorUpdate{{PubKey: pk2, Power: 20}}, types.NewValidatorSet([]*types.Validator{val1, val2}), false, }, { "updating a validator is OK", - types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, - + []abci.ValidatorUpdate{{PubKey: pk, Power: 20}}, types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), false, }, { "removing a validator is OK", - types.NewValidatorSet([]*types.Validator{val1, val2}), - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, - + []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), false, }, { "removing a non-existing validator results in error", - types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, - + []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), true, }, @@ -325,22 +351,24 @@ func TestEndBlockValidatorUpdates(t *testing.T) { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, - sm.MockEvidencePool{}, + mmock.Mempool{}, + sm.EmptyEvidencePool{}, ) eventBus := types.NewEventBus() err = eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + defer eventBus.Stop() //nolint:errcheck // ignore for tests + blockExec.SetEventBus(eventBus) updatesSub, err := eventBus.Subscribe( @@ -351,16 +379,17 @@ func TestEndBlockValidatorUpdates(t *testing.T) { require.NoError(t, err) block := makeBlock(state, 1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() + pk, err := cryptoenc.PubKeyToProto(pubkey) + require.NoError(t, err) app.ValidatorUpdates = []abci.ValidatorUpdate{ - {PubKey: types.TM2PB.PubKey(pubkey), Power: 10}, + {PubKey: pk, Power: 10}, } state, _, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) - // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { idx, _ := state.NextValidators.GetByAddress(pubkey.Address()) @@ -393,27 +422,45 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, - sm.MockEvidencePool{}, + mmock.Mempool{}, + sm.EmptyEvidencePool{}, ) block := makeBlock(state, 1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) + require.NoError(t, err) // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ - {PubKey: types.TM2PB.PubKey(state.Validators.Validators[0].PubKey), Power: 0}, + {PubKey: vp, Power: 0}, } assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block) }) assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) +} +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { + var ( + h = make([]byte, tmhash.Size) + psH = make([]byte, tmhash.Size) + ) + copy(h, hash) + copy(psH, partSetHash) + return types.BlockID{ + Hash: h, + PartSetHeader: types.PartSetHeader{ + Total: partSetSize, + Hash: psH, + }, + } } diff --git a/state/export_test.go b/state/export_test.go index cba07eca3..56c3d764c 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -4,6 +4,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -26,7 +28,7 @@ func UpdateState( state State, blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses, + abciResponses *tmstate.ABCIResponses, validatorUpdates []*types.Validator, ) (State, error) { return updateState(state, blockID, header, abciResponses, validatorUpdates) @@ -34,18 +36,13 @@ func UpdateState( // ValidateValidatorUpdates is an alias for validateValidatorUpdates exported // from execution.go, exclusively and explicitly for testing. -func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { +func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto.ValidatorParams) error { return validateValidatorUpdates(abciUpdates, params) } -// SaveConsensusParamsInfo is an alias for the private saveConsensusParamsInfo -// method in store.go, exported exclusively and explicitly for testing. -func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { - saveConsensusParamsInfo(db, nextHeight, changeHeight, params) -} - // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { - saveValidatorsInfo(db, height, lastHeightChanged, valSet) +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { + stateStore := dbStore{db} + return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index a85e35748..19549f160 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -11,6 +11,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" tmrand "github.com/tendermint/tendermint/libs/rand" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -19,17 +21,9 @@ import ( type paramsChangeTestCase struct { height int64 - params types.ConsensusParams + params tmproto.ConsensusParams } -// always returns true if asked if any evidence was already committed. -type mockEvPoolAlwaysCommitted struct{} - -func (m mockEvPoolAlwaysCommitted) PendingEvidence(int64) []types.Evidence { return nil } -func (m mockEvPoolAlwaysCommitted) AddEvidence(types.Evidence) error { return nil } -func (m mockEvPoolAlwaysCommitted) Update(*types.Block, sm.State) {} -func (m mockEvPoolAlwaysCommitted) IsCommitted(types.Evidence) bool { return true } - func newTestApp() proxy.AppConns { app := &testApp{} cc := proxy.NewLocalClientCreator(app) @@ -65,7 +59,7 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi return state, types.BlockID{}, err } blockID := types.BlockID{Hash: block.Hash(), - PartsHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} + PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} state, _, err := blockExec.ApplyBlock(state, blockID, block) if err != nil { return state, types.BlockID{}, err @@ -81,7 +75,7 @@ func makeValidCommit( ) (*types.Commit, error) { sigs := make([]types.CommitSig, 0) for i := 0; i < vals.Size(); i++ { - _, val := vals.GetByIndex(i) + _, val := vals.GetByIndex(int32(i)) vote, err := types.MakeVote(height, blockID, vals, privVals[val.Address.String()], chainID, time.Now()) if err != nil { return nil, err @@ -121,13 +115,19 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida }) stateDB := dbm.NewMemDB() - sm.SaveState(stateDB, s) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(s); err != nil { + panic(err) + } for i := 1; i < height; i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - sm.SaveState(stateDB, s) + if err := stateStore.Save(s); err != nil { + panic(err) + } } + return s, stateDB, privVals } @@ -150,34 +150,16 @@ func genValSet(size int) *types.ValidatorSet { return types.NewValidatorSet(vals) } -func makeConsensusParams( - blockBytes, blockGas int64, - blockTimeIotaMs int64, - evidenceAge int64, -) types.ConsensusParams { - return types.ConsensusParams{ - Block: types.BlockParams{ - MaxBytes: blockBytes, - MaxGas: blockGas, - TimeIotaMs: blockTimeIotaMs, - }, - Evidence: types.EvidenceParams{ - MaxAgeNumBlocks: evidenceAge, - MaxAgeDuration: time.Duration(evidenceAge), - }, - } -} - func makeHeaderPartsResponsesValPubKeyChange( state sm.State, pubkey crypto.PubKey, -) (types.Header, types.BlockID, *sm.ABCIResponses) { +) (types.Header, types.BlockID, *tmstate.ABCIResponses) { block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { @@ -189,17 +171,18 @@ func makeHeaderPartsResponsesValPubKeyChange( } } - return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } func makeHeaderPartsResponsesValPowerChange( state sm.State, power int64, -) (types.Header, types.BlockID, *sm.ABCIResponses) { +) (types.Header, types.BlockID, *tmstate.ABCIResponses) { block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } // If the pubkey is new, remove the old and add the new. @@ -212,19 +195,20 @@ func makeHeaderPartsResponsesValPowerChange( } } - return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } func makeHeaderPartsResponsesParams( state sm.State, - params types.ConsensusParams, -) (types.Header, types.BlockID, *sm.ABCIResponses) { + params tmproto.ConsensusParams, +) (types.Header, types.BlockID, *tmstate.ABCIResponses) { block := makeBlock(state, state.LastBlockHeight+1) - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, } - return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses + return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } func randomGenesisDoc() *types.GenesisDoc { @@ -267,7 +251,11 @@ func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlo } func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates} + return abci.ResponseEndBlock{ + ValidatorUpdates: app.ValidatorUpdates, + ConsensusParamUpdates: &abci.ConsensusParams{ + Version: &tmproto.VersionParams{ + AppVersion: 1}}} } func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { @@ -279,7 +267,7 @@ func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { } func (app *testApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{} + return abci.ResponseCommit{RetainHeight: 1} } func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { diff --git a/state/main_test.go b/state/main_test.go deleted file mode 100644 index 00ecf2686..000000000 --- a/state/main_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package state_test - -import ( - "os" - "testing" - - "github.com/tendermint/tendermint/types" -) - -func TestMain(m *testing.M) { - types.RegisterMockEvidencesGlobal() - os.Exit(m.Run()) -} diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go new file mode 100644 index 000000000..7292991ca --- /dev/null +++ b/state/mocks/evidence_pool.go @@ -0,0 +1,71 @@ +// Code generated by mockery v2.1.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/state" + + types "github.com/tendermint/tendermint/types" +) + +// EvidencePool is an autogenerated mock type for the EvidencePool type +type EvidencePool struct { + mock.Mock +} + +// AddEvidence provides a mock function with given fields: _a0 +func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CheckEvidence provides a mock function with given fields: _a0 +func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// PendingEvidence provides a mock function with given fields: maxBytes +func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { + ret := _m.Called(maxBytes) + + var r0 []types.Evidence + if rf, ok := ret.Get(0).(func(int64) []types.Evidence); ok { + r0 = rf(maxBytes) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]types.Evidence) + } + } + + var r1 int64 + if rf, ok := ret.Get(1).(func(int64) int64); ok { + r1 = rf(maxBytes) + } else { + r1 = ret.Get(1).(int64) + } + + return r0, r1 +} + +// Update provides a mock function with given fields: _a0, _a1 +func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { + _m.Called(_a0, _a1) +} diff --git a/state/mocks/store.go b/state/mocks/store.go new file mode 100644 index 000000000..17e1ef7b9 --- /dev/null +++ b/state/mocks/store.go @@ -0,0 +1,205 @@ +// Code generated by mockery v2.1.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/state" + + tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" + + tenderminttypes "github.com/tendermint/tendermint/types" + + types "github.com/tendermint/tendermint/proto/tendermint/types" +) + +// Store is an autogenerated mock type for the Store type +type Store struct { + mock.Mock +} + +// Bootstrap provides a mock function with given fields: _a0 +func (_m *Store) Bootstrap(_a0 state.State) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(state.State) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Load provides a mock function with given fields: +func (_m *Store) Load() (state.State, error) { + ret := _m.Called() + + var r0 state.State + if rf, ok := ret.Get(0).(func() state.State); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(state.State) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadABCIResponses provides a mock function with given fields: _a0 +func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, error) { + ret := _m.Called(_a0) + + var r0 *tendermintstate.ABCIResponses + if rf, ok := ret.Get(0).(func(int64) *tendermintstate.ABCIResponses); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*tendermintstate.ABCIResponses) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadConsensusParams provides a mock function with given fields: _a0 +func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { + ret := _m.Called(_a0) + + var r0 types.ConsensusParams + if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(types.ConsensusParams) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadFromDBOrGenesisDoc provides a mock function with given fields: _a0 +func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *tenderminttypes.GenesisDoc) (state.State, error) { + ret := _m.Called(_a0) + + var r0 state.State + if rf, ok := ret.Get(0).(func(*tenderminttypes.GenesisDoc) state.State); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(state.State) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*tenderminttypes.GenesisDoc) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadFromDBOrGenesisFile provides a mock function with given fields: _a0 +func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) { + ret := _m.Called(_a0) + + var r0 state.State + if rf, ok := ret.Get(0).(func(string) state.State); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(state.State) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadValidators provides a mock function with given fields: _a0 +func (_m *Store) LoadValidators(_a0 int64) (*tenderminttypes.ValidatorSet, error) { + ret := _m.Called(_a0) + + var r0 *tenderminttypes.ValidatorSet + if rf, ok := ret.Get(0).(func(int64) *tenderminttypes.ValidatorSet); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*tenderminttypes.ValidatorSet) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(int64) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PruneStates provides a mock function with given fields: _a0, _a1 +func (_m *Store) PruneStates(_a0 int64, _a1 int64) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(int64, int64) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Save provides a mock function with given fields: _a0 +func (_m *Store) Save(_a0 state.State) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(state.State) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SaveABCIResponses provides a mock function with given fields: _a0, _a1 +func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(int64, *tendermintstate.ABCIResponses) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/state/services.go b/state/services.go index a30956bdc..5213f8fdd 100644 --- a/state/services.go +++ b/state/services.go @@ -18,6 +18,7 @@ type BlockStore interface { Height() int64 Size() int64 + LoadBaseMeta() *types.BlockMeta LoadBlockMeta(height int64) *types.BlockMeta LoadBlock(height int64) *types.Block @@ -35,20 +36,26 @@ type BlockStore interface { //----------------------------------------------------------------------------- // evidence pool -// EvidencePool defines the EvidencePool interface used by the ConsensusState. -// Get/Set/Commit +//go:generate mockery --case underscore --name EvidencePool + +// EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { - PendingEvidence(int64) []types.Evidence + PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) AddEvidence(types.Evidence) error - Update(*types.Block, State) - // IsCommitted indicates if this evidence was already marked committed in another block. - IsCommitted(types.Evidence) bool + Update(State, types.EvidenceList) + CheckEvidence(types.EvidenceList) error } -// MockEvidencePool is an empty implementation of EvidencePool, useful for testing. -type MockEvidencePool struct{} +// EmptyEvidencePool is an empty implementation of EvidencePool, useful for testing. It also complies +// to the consensus evidence pool interface +type EmptyEvidencePool struct{} -func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil } -func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (m MockEvidencePool) Update(*types.Block, State) {} -func (m MockEvidencePool) IsCommitted(types.Evidence) bool { return false } +func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { + return nil, 0 +} +func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } +func (EmptyEvidencePool) Update(State, types.EvidenceList) {} +func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } +func (EmptyEvidencePool) AddEvidenceFromConsensus(evidence types.Evidence) error { + return nil +} diff --git a/state/state.go b/state/state.go index e0612576a..d9da840ca 100644 --- a/state/state.go +++ b/state/state.go @@ -2,10 +2,16 @@ package state import ( "bytes" + "errors" "fmt" "io/ioutil" "time" + "github.com/gogo/protobuf/proto" + + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" @@ -18,21 +24,12 @@ var ( //----------------------------------------------------------------------------- -// Version is for versioning the State. -// It holds the Block and App version needed for making blocks, -// and the software version to support upgrades to the format of -// the State as stored on disk. -type Version struct { - Consensus version.Consensus - Software string -} - -// initStateVersion sets the Consensus.Block and Software versions, +// InitStateVersion sets the Consensus.Block and Software versions, // but leaves the Consensus.App version blank. // The Consensus.App version will be set during the Handshake, once // we hear from the app what protocol version it is running. -var initStateVersion = Version{ - Consensus: version.Consensus{ +var InitStateVersion = tmstate.Version{ + Consensus: tmversion.Consensus{ Block: version.BlockProtocol, App: 0, }, @@ -49,10 +46,11 @@ var initStateVersion = Version{ // Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { - Version Version + Version tmstate.Version // immutable - ChainID string + ChainID string + InitialHeight int64 // should be 1, not 0, when starting from height 1 // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight int64 @@ -72,7 +70,7 @@ type State struct { // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. - ConsensusParams types.ConsensusParams + ConsensusParams tmproto.ConsensusParams LastHeightConsensusParamsChanged int64 // Merkle root of the results from executing prev block @@ -84,9 +82,11 @@ type State struct { // Copy makes a copy of the State for mutating. func (state State) Copy() State { + return State{ - Version: state.Version, - ChainID: state.ChainID, + Version: state.Version, + ChainID: state.ChainID, + InitialHeight: state.InitialHeight, LastBlockHeight: state.LastBlockHeight, LastBlockID: state.LastBlockID, @@ -112,9 +112,18 @@ func (state State) Equals(state2 State) bool { return bytes.Equal(sbz, s2bz) } -// Bytes serializes the State using go-amino. +// Bytes serializes the State using protobuf. +// It panics if either casting to protobuf or serialization fails. func (state State) Bytes() []byte { - return cdc.MustMarshalBinaryBare(state) + sm, err := state.ToProto() + if err != nil { + panic(err) + } + bz, err := proto.Marshal(sm) + if err != nil { + panic(err) + } + return bz } // IsEmpty returns true if the State is equal to the empty State. @@ -122,6 +131,101 @@ func (state State) IsEmpty() bool { return state.Validators == nil // XXX can't compare to Empty } +// ToProto takes the local state type and returns the equivalent proto type +func (state *State) ToProto() (*tmstate.State, error) { + if state == nil { + return nil, errors.New("state is nil") + } + + sm := new(tmstate.State) + + sm.Version = state.Version + sm.ChainID = state.ChainID + sm.InitialHeight = state.InitialHeight + sm.LastBlockHeight = state.LastBlockHeight + + sm.LastBlockID = state.LastBlockID.ToProto() + sm.LastBlockTime = state.LastBlockTime + vals, err := state.Validators.ToProto() + if err != nil { + return nil, err + } + sm.Validators = vals + + nVals, err := state.NextValidators.ToProto() + if err != nil { + return nil, err + } + sm.NextValidators = nVals + + if state.LastBlockHeight >= 1 { // At Block 1 LastValidators is nil + lVals, err := state.LastValidators.ToProto() + if err != nil { + return nil, err + } + sm.LastValidators = lVals + } + + sm.LastHeightValidatorsChanged = state.LastHeightValidatorsChanged + sm.ConsensusParams = state.ConsensusParams + sm.LastHeightConsensusParamsChanged = state.LastHeightConsensusParamsChanged + sm.LastResultsHash = state.LastResultsHash + sm.AppHash = state.AppHash + + return sm, nil +} + +// StateFromProto takes a state proto message & returns the local state type +func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint + if pb == nil { + return nil, errors.New("nil State") + } + + state := new(State) + + state.Version = pb.Version + state.ChainID = pb.ChainID + state.InitialHeight = pb.InitialHeight + + bi, err := types.BlockIDFromProto(&pb.LastBlockID) + if err != nil { + return nil, err + } + state.LastBlockID = *bi + state.LastBlockHeight = pb.LastBlockHeight + state.LastBlockTime = pb.LastBlockTime + + vals, err := types.ValidatorSetFromProto(pb.Validators) + if err != nil { + return nil, err + } + state.Validators = vals + + nVals, err := types.ValidatorSetFromProto(pb.NextValidators) + if err != nil { + return nil, err + } + state.NextValidators = nVals + + if state.LastBlockHeight >= 1 { // At Block 1 LastValidators is nil + lVals, err := types.ValidatorSetFromProto(pb.LastValidators) + if err != nil { + return nil, err + } + state.LastValidators = lVals + } else { + state.LastValidators = types.NewValidatorSet(nil) + } + + state.LastHeightValidatorsChanged = pb.LastHeightValidatorsChanged + state.ConsensusParams = pb.ConsensusParams + state.LastHeightConsensusParamsChanged = pb.LastHeightConsensusParamsChanged + state.LastResultsHash = pb.LastResultsHash + state.AppHash = pb.AppHash + + return state, nil +} + //------------------------------------------------------------------------ // Create a block from the latest state @@ -141,7 +245,7 @@ func (state State) MakeBlock( // Set time. var timestamp time.Time - if height == 1 { + if height == state.InitialHeight { timestamp = state.LastBlockTime // genesis time } else { timestamp = MedianTime(commit, state.LastValidators) @@ -152,7 +256,7 @@ func (state State) MakeBlock( state.Version.Consensus, state.ChainID, timestamp, state.LastBlockID, state.Validators.Hash(), state.NextValidators.Hash(), - state.ConsensusParams.Hash(), state.AppHash, state.LastResultsHash, + types.HashConsensusParams(state.ConsensusParams), state.AppHash, state.LastResultsHash, proposerAddress, ) @@ -231,8 +335,9 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } return State{ - Version: initStateVersion, - ChainID: genDoc.ChainID, + Version: InitStateVersion, + ChainID: genDoc.ChainID, + InitialHeight: genDoc.InitialHeight, LastBlockHeight: 0, LastBlockID: types.BlockID{}, @@ -241,10 +346,10 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { NextValidators: nextValidatorSet, Validators: validatorSet, LastValidators: types.NewValidatorSet(nil), - LastHeightValidatorsChanged: 1, + LastHeightValidatorsChanged: genDoc.InitialHeight, ConsensusParams: *genDoc.ConsensusParams, - LastHeightConsensusParamsChanged: 1, + LastHeightConsensusParamsChanged: genDoc.InitialHeight, AppHash: genDoc.AppHash, }, nil diff --git a/state/state_test.go b/state/state_test.go index 746f7837c..1632f4304 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -7,7 +7,6 @@ import ( "math/big" "os" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,9 +16,10 @@ import ( abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/kv" - "github.com/tendermint/tendermint/libs/rand" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" tmrand "github.com/tendermint/tendermint/libs/rand" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -28,9 +28,13 @@ import ( func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) - stateDB := dbm.NewDB("state", dbType, config.DBDir()) - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateStore := sm.NewStore(stateDB) + require.NoError(t, err) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") + err = stateStore.Save(state) + require.NoError(t, err) tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } @@ -50,11 +54,12 @@ func TestStateCopy(t *testing.T) { stateCopy, state)) stateCopy.LastBlockHeight++ + stateCopy.LastValidators = state.Validators assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same %v`, state)) } -//TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. +// TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. func TestMakeGenesisStateNilValidators(t *testing.T) { doc := types.GenesisDoc{ ChainID: "dummy", @@ -71,12 +76,16 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) assert := assert.New(t) state.LastBlockHeight++ - sm.SaveState(stateDB, state) + state.LastValidators = state.Validators + err := stateStore.Save(state) + require.NoError(t, err) - loadedState := sm.LoadState(stateDB) + loadedState, err := stateStore.Load() + require.NoError(t, err) assert.True(state.Equals(loadedState), fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", loadedState, state)) @@ -86,21 +95,27 @@ func TestStateSaveLoad(t *testing.T) { func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) assert := assert.New(t) state.LastBlockHeight++ // Build mock responses. block := makeBlock(state, 2) - abciResponses := sm.NewABCIResponses(block) + + abciResponses := new(tmstate.ABCIResponses) + dtxs := make([]*abci.ResponseDeliverTx, 2) + abciResponses.DeliverTxs = dtxs + abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} - sm.SaveABCIResponses(stateDB, block.Height, abciResponses) - loadedABCIResponses, err := sm.LoadABCIResponses(stateDB, block.Height) + err := stateStore.SaveABCIResponses(block.Height, abciResponses) + require.NoError(t, err) + loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedABCIResponses, fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", @@ -113,11 +128,13 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { defer tearDown(t) assert := assert.New(t) + stateStore := sm.NewStore(stateDB) + cases := [...]struct { // Height is implied to equal index+2, // as block 1 is created from genesis. added []*abci.ResponseDeliverTx - expected types.ABCIResults + expected []*abci.ResponseDeliverTx }{ 0: { nil, @@ -127,7 +144,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { []*abci.ResponseDeliverTx{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, - types.ABCIResults{ + []*abci.ResponseDeliverTx{ {Code: 32, Data: []byte("Hello")}, }}, 2: { @@ -136,44 +153,60 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { { Data: []byte("Gotcha!"), Events: []abci.Event{ - {Type: "type1", Attributes: []kv.Pair{{Key: []byte("a"), Value: []byte("1")}}}, - {Type: "type2", Attributes: []kv.Pair{{Key: []byte("build"), Value: []byte("stuff")}}}, + {Type: "type1", Attributes: []abci.EventAttribute{{Key: []byte("a"), Value: []byte("1")}}}, + {Type: "type2", Attributes: []abci.EventAttribute{{Key: []byte("build"), Value: []byte("stuff")}}}, }, }, }, - types.ABCIResults{ + []*abci.ResponseDeliverTx{ {Code: 383, Data: nil}, - {Code: 0, Data: []byte("Gotcha!")}, + {Code: 0, Data: []byte("Gotcha!"), Events: []abci.Event{ + {Type: "type1", Attributes: []abci.EventAttribute{{Key: []byte("a"), Value: []byte("1")}}}, + {Type: "type2", Attributes: []abci.EventAttribute{{Key: []byte("build"), Value: []byte("stuff")}}}, + }}, }}, 3: { nil, nil, }, + 4: { + []*abci.ResponseDeliverTx{nil}, + nil, + }, } // Query all before, this should return error. for i := range cases { h := int64(i + 1) - res, err := sm.LoadABCIResponses(stateDB, h) + res, err := stateStore.LoadABCIResponses(h) assert.Error(err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save - responses := &sm.ABCIResponses{ + responses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, DeliverTxs: tc.added, EndBlock: &abci.ResponseEndBlock{}, } - sm.SaveABCIResponses(stateDB, h, responses) + err := stateStore.SaveABCIResponses(h, responses) + require.NoError(t, err) } // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) - res, err := sm.LoadABCIResponses(stateDB, h) - assert.NoError(err, "%d", i) - assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) + res, err := stateStore.LoadABCIResponses(h) + if assert.NoError(err, "%d", i) { + t.Log(res) + responses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: tc.expected, + EndBlock: &abci.ResponseEndBlock{}, + } + assert.Equal(sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) + } } } @@ -183,27 +216,30 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { defer tearDown(t) assert := assert.New(t) + statestore := sm.NewStore(stateDB) + // Can't load anything for height 0. - _, err := sm.LoadValidators(stateDB, 0) + _, err := statestore.LoadValidators(0) assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err := sm.LoadValidators(stateDB, 1) + v, err := statestore.LoadValidators(1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. - v, err = sm.LoadValidators(stateDB, 2) + v, err = statestore.LoadValidators(2) assert.Nil(err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - vp0, err := sm.LoadValidators(stateDB, nextHeight+0) + err = statestore.Save(state) + require.NoError(t, err) + vp0, err := statestore.LoadValidators(nextHeight + 0) assert.Nil(err, "expected no err") - vp1, err := sm.LoadValidators(stateDB, nextHeight+1) + vp1, err := statestore.LoadValidators(nextHeight + 1) assert.Nil(err, "expected no err") assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") @@ -213,6 +249,7 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} @@ -237,8 +274,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) - nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + err := stateStore.Save(state) + require.NoError(t, err) } // On each height change, increment the power by one. @@ -256,7 +293,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. + v, err := stateStore.LoadValidators(int64(i + 1 + 1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -347,7 +384,7 @@ func genValSetWithPowers(powers []int64) *types.ValidatorSet { for i := 0; i < size; i++ { totalVotePower += powers[i] val := types.NewValidator(ed25519.GenPrivKey().PubKey(), powers[i]) - val.ProposerPriority = rand.Int64() + val.ProposerPriority = tmrand.Int64() vals[i] = val } valSet := types.NewValidatorSet(vals) @@ -373,7 +410,7 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { // assert frequencies match expected (max off by 1) for i, freq := range freqs { - _, val := valSet.GetByIndex(i) + _, val := valSet.GetByIndex(int32(i)) expectFreq := int(val.VotingPower) * runMult gotFreq := freq abs := int(math.Abs(float64(expectFreq - gotFreq))) @@ -407,9 +444,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { assert.EqualValues(t, 0, val1.ProposerPriority) block := makeBlock(state, state.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -422,7 +460,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() val2VotingPower := int64(100) - updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2VotingPower} + fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + require.NoError(t, err) + + updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -458,7 +499,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // Updating a validator does not reset the ProposerPriority to zero: // 1. Add - Val2 VotingPower change to 1 => updatedVotingPowVal2 := int64(1) - updateVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: updatedVotingPowVal2} + updateVal := abci.ValidatorUpdate{PubKey: fvp, Power: updatedVotingPowVal2} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) assert.NoError(t, err) @@ -517,10 +558,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) block := makeBlock(state, state.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -536,7 +578,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val1VotingPower} + fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + require.NoError(t, err) + updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) @@ -619,8 +663,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 - abciResponses = &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses = &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -634,8 +679,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -691,14 +737,15 @@ func TestLargeGenesisValidator(t *testing.T) { oldState := state for i := 0; i < 10; i++ { // no updates: - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -717,28 +764,32 @@ func TestLargeGenesisValidator(t *testing.T) { // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValVotingPower := int64(10) - firstAddedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(firstAddedValPubKey), Power: firstAddedValVotingPower} + fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) + require.NoError(t, err) + firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) lastState := updatedState for i := 0; i < 200; i++ { // no updates: - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(lastState, lastState.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -760,28 +811,33 @@ func TestLargeGenesisValidator(t *testing.T) { // add 10 validators with the same voting power as the one added directly after genesis: for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - - addedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(addedPubKey), Power: firstAddedValVotingPower} + ap, err := cryptoenc.PubKeyToProto(addedPubKey) + require.NoError(t, err) + addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) assert.NoError(t, err) - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } block := makeBlock(oldState, oldState.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) } require.Equal(t, 10+2, len(state.NextValidators.Validators)) // remove genesis validator: - removeGenesisVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(genesisPubKey), Power: 0} - abciResponses = &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, + gp, err := cryptoenc.PubKeyToProto(genesisPubKey) + require.NoError(t, err) + removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} + abciResponses = &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } block = makeBlock(oldState, oldState.LastBlockHeight+1) - blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) @@ -795,13 +851,14 @@ func TestLargeGenesisValidator(t *testing.T) { count := 0 isProposerUnchanged := true for isProposerUnchanged { - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block = makeBlock(curState, curState.LastBlockHeight+1) - blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { @@ -818,14 +875,15 @@ func TestLargeGenesisValidator(t *testing.T) { proposers := make([]*types.Validator, numVals) for i := 0; i < 100; i++ { // no updates: - abciResponses := &sm.ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, + abciResponses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) block := makeBlock(updatedState, updatedState.LastBlockHeight+1) - blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -842,18 +900,20 @@ func TestLargeGenesisValidator(t *testing.T) { func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { const valSetSize = 2 tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) + t.Cleanup(func() { tearDown(t) }) + stateStore := sm.NewStore(stateDB) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err := stateStore.Save(state) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, err := stateStore.LoadValidators(nextHeight) assert.Nil(t, err) acc0 := v0.Validators[0].ProposerPriority - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, err := stateStore.LoadValidators(nextHeight + 1) assert.Nil(t, err) acc1 := v1.Validators[0].ProposerPriority @@ -866,10 +926,12 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err := stateStore.Save(state) + require.NoError(t, err) _, valOld := state.Validators.GetByIndex(0) var pubkeyOld = valOld.PubKey @@ -879,17 +941,17 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. - var err error var validatorUpdates []*types.Validator validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + err = stateStore.Save(state) + require.NoError(t, err) // Load nextheight, it should be the oldpubkey. - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, err := stateStore.LoadValidators(nextHeight) assert.Nil(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) @@ -899,7 +961,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { } // Load nextheight+1, it should be the new pubkey. - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, err := stateStore.LoadValidators(nextHeight + 1) assert.Nil(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) @@ -928,17 +990,20 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) // Each valset is just one validator. // create list of them. - params := make([]types.ConsensusParams, N+1) + params := make([]tmproto.ConsensusParams, N+1) params[0] = state.ConsensusParams for i := 1; i < N+1; i++ { params[i] = *types.DefaultConsensusParams() params[i].Block.MaxBytes += int64(i) + params[i].Block.TimeIotaMs = 10 } // Build the params history by running updateState @@ -960,8 +1025,8 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) - nextHeight := state.LastBlockHeight + 1 - sm.SaveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + err := stateStore.Save(state) + require.NoError(t, err) } // Make all the test cases by using the same params until after the change. @@ -979,43 +1044,43 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } for _, testCase := range testCases { - p, err := sm.LoadConsensusParams(stateDB, testCase.height) + p, err := stateStore.LoadConsensusParams(testCase.height) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) - assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at + assert.EqualValues(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) } } -func TestApplyUpdates(t *testing.T) { - initParams := makeConsensusParams(1, 2, 3, 4) - const maxAge int64 = 66 - cases := [...]struct { - init types.ConsensusParams - updates abci.ConsensusParams - expected types.ConsensusParams +func TestStateProto(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + + tc := []struct { + testName string + state *sm.State + expPass1 bool + expPass2 bool }{ - 0: {initParams, abci.ConsensusParams{}, initParams}, - 1: {initParams, abci.ConsensusParams{}, initParams}, - 2: {initParams, - abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxBytes: 44, - MaxGas: 55, - }, - }, - makeConsensusParams(44, 55, 3, 4)}, - 3: {initParams, - abci.ConsensusParams{ - Evidence: &abci.EvidenceParams{ - MaxAgeNumBlocks: maxAge, - MaxAgeDuration: time.Duration(maxAge), - }, - }, - makeConsensusParams(1, 2, 3, maxAge)}, + {"empty state", &sm.State{}, true, false}, + {"nil failure state", nil, false, false}, + {"success state", &state, true, true}, } - for i, tc := range cases { - res := tc.init.Update(&(tc.updates)) - assert.Equal(t, tc.expected, res, "case %d", i) + for _, tt := range tc { + tt := tt + pbs, err := tt.state.ToProto() + if !tt.expPass1 { + assert.Error(t, err) + } else { + assert.NoError(t, err, tt.testName) + } + + smt, err := sm.StateFromProto(pbs) + if tt.expPass2 { + require.NoError(t, err, tt.testName) + require.Equal(t, tt.state, smt, tt.testName) + } else { + require.Error(t, err, tt.testName) + } } } diff --git a/state/store.go b/state/store.go index 08b695f8a..5bd2d9fc7 100644 --- a/state/store.go +++ b/state/store.go @@ -1,13 +1,17 @@ package state import ( + "errors" "fmt" + "github.com/gogo/protobuf/proto" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -33,96 +37,179 @@ func calcABCIResponsesKey(height int64) []byte { return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) } +//---------------------- + +//go:generate mockery --case underscore --name Store + +// Store defines the state store interface +// +// It is used to retrieve current state and save and load ABCI responses, +// validators and consensus parameters +type Store interface { + // LoadFromDBOrGenesisFile loads the most recent state. + // If the chain is new it will use the genesis file from the provided genesis file path as the current state. + LoadFromDBOrGenesisFile(string) (State, error) + // LoadFromDBOrGenesisDoc loads the most recent state. + // If the chain is new it will use the genesis doc as the current state. + LoadFromDBOrGenesisDoc(*types.GenesisDoc) (State, error) + // Load loads the current state of the blockchain + Load() (State, error) + // LoadValidators loads the validator set at a given height + LoadValidators(int64) (*types.ValidatorSet, error) + // LoadABCIResponses loads the abciResponse for a given height + LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadConsensusParams loads the consensus params for a given height + LoadConsensusParams(int64) (tmproto.ConsensusParams, error) + // Save overwrites the previous state with the updated one + Save(State) error + // SaveABCIResponses saves ABCIResponses for a given height + SaveABCIResponses(int64, *tmstate.ABCIResponses) error + // Bootstrap is used for bootstrapping state when not starting from a initial height. + Bootstrap(State) error + // PruneStates takes the height from which to start prning and which height stop at + PruneStates(int64, int64) error +} + +// dbStore wraps a db (github.com/tendermint/tm-db) +type dbStore struct { + db dbm.DB +} + +var _ Store = (*dbStore)(nil) + +// NewStore creates the dbStore of the state pkg. +func NewStore(db dbm.DB) Store { + return dbStore{db} +} + // LoadStateFromDBOrGenesisFile loads the most recent state from the database, -// or creates a new one from the given genesisFilePath and persists the result -// to the database. -func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State, error) { - state := LoadState(stateDB) +// or creates a new one from the given genesisFilePath. +func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) { + state, err := store.Load() + if err != nil { + return State{}, err + } if state.IsEmpty() { var err error state, err = MakeGenesisStateFromFile(genesisFilePath) if err != nil { return state, err } - SaveState(stateDB, state) } return state, nil } // LoadStateFromDBOrGenesisDoc loads the most recent state from the database, -// or creates a new one from the given genesisDoc and persists the result -// to the database. -func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) (State, error) { - state := LoadState(stateDB) +// or creates a new one from the given genesisDoc. +func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) { + state, err := store.Load() + if err != nil { + return State{}, err + } + if state.IsEmpty() { var err error state, err = MakeGenesisState(genesisDoc) if err != nil { return state, err } - SaveState(stateDB, state) } return state, nil } // LoadState loads the State from the database. -func LoadState(db dbm.DB) State { - return loadState(db, stateKey) +func (store dbStore) Load() (State, error) { + return store.loadState(stateKey) } -func loadState(db dbm.DB, key []byte) (state State) { - buf, err := db.Get(key) +func (store dbStore) loadState(key []byte) (state State, err error) { + buf, err := store.db.Get(key) if err != nil { - panic(err) + return state, err } if len(buf) == 0 { - return state + return state, nil } - err = cdc.UnmarshalBinaryBare(buf, &state) + sp := new(tmstate.State) + + err = proto.Unmarshal(buf, sp) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) + %v\n`, err)) } - // TODO: ensure that buf is completely read. - return state + sm, err := StateFromProto(sp) + if err != nil { + return state, err + } + + return *sm, nil } -// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. +// Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. // This flushes the writes (e.g. calls SetSync). -func SaveState(db dbm.DB, state State) { - saveState(db, state, stateKey) +func (store dbStore) Save(state State) error { + return store.save(state, stateKey) } -func saveState(db dbm.DB, state State, key []byte) { +func (store dbStore) save(state State, key []byte) error { nextHeight := state.LastBlockHeight + 1 - // If first block, save validators for block 1. + // If first block, save validators for the block. if nextHeight == 1 { + nextHeight = state.InitialHeight // This extra logic due to Tendermint validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. - lastHeightVoteChanged := int64(1) - saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators); err != nil { + return err + } } // Save next validators. - saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + return err + } + // Save next consensus params. - saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) - db.SetSync(key, state.Bytes()) + if err := store.saveConsensusParamsInfo(nextHeight, + state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + return err + } + err := store.db.SetSync(key, state.Bytes()) + if err != nil { + return err + } + return nil } -//------------------------------------------------------------------------ +// BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. +func (store dbStore) Bootstrap(state State) error { + height := state.LastBlockHeight + 1 + if height == 1 { + height = state.InitialHeight + } + + if height > 1 && !state.LastValidators.IsNilOrEmpty() { + if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators); err != nil { + return err + } + } + + if err := store.saveValidatorsInfo(height, height, state.Validators); err != nil { + return err + } + + if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators); err != nil { + return err + } -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -type ABCIResponses struct { - DeliverTxs []*abci.ResponseDeliverTx `json:"deliver_txs"` - EndBlock *abci.ResponseEndBlock `json:"end_block"` - BeginBlock *abci.ResponseBeginBlock `json:"begin_block"` + if err := store.saveConsensusParamsInfo(height, height, state.ConsensusParams); err != nil { + return err + } + + return store.db.SetSync(stateKey, state.Bytes()) } // PruneStates deletes states between the given heights (including from, excluding to). It is not @@ -133,20 +220,20 @@ type ABCIResponses struct { // encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 // This will cause some old states to be left behind when doing incremental partial prunes, // specifically older checkpoints and LastHeightChanged targets. -func PruneStates(db dbm.DB, from int64, to int64) error { +func (store dbStore) PruneStates(from int64, to int64) error { if from <= 0 || to <= 0 { return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) } if from >= to { return fmt.Errorf("from height %v must be lower than to height %v", from, to) } - valInfo := loadValidatorsInfo(db, to) - if valInfo == nil { - return fmt.Errorf("validators at height %v not found", to) + valInfo, err := loadValidatorsInfo(store.db, to) + if err != nil { + return fmt.Errorf("validators at height %v not found: %w", to, err) } - paramsInfo := loadConsensusParamsInfo(db, to) - if paramsInfo == nil { - return fmt.Errorf("consensus params at height %v not found", to) + paramsInfo, err := store.loadConsensusParamsInfo(to) + if err != nil { + return fmt.Errorf("consensus params at height %v not found: %w", to, err) } keepVals := make(map[int64]bool) @@ -155,14 +242,13 @@ func PruneStates(db dbm.DB, from int64, to int64) error { keepVals[lastStoredHeightFor(to, valInfo.LastHeightChanged)] = true // keep last checkpoint too } keepParams := make(map[int64]bool) - if paramsInfo.ConsensusParams.Equals(&types.ConsensusParams{}) { + if paramsInfo.ConsensusParams.Equal(&tmproto.ConsensusParams{}) { keepParams[paramsInfo.LastHeightChanged] = true } - batch := db.NewBatch() + batch := store.db.NewBatch() defer batch.Close() pruned := uint64(0) - var err error // We have to delete in reverse order, to avoid deleting previous heights that have validator // sets and consensus params that we may need to retrieve. @@ -171,34 +257,71 @@ func PruneStates(db dbm.DB, from int64, to int64) error { // params, otherwise they will panic if they're retrieved directly (instead of // indirectly via a LastHeightChanged pointer). if keepVals[h] { - v := loadValidatorsInfo(db, h) - if v.ValidatorSet == nil { - v.ValidatorSet, err = LoadValidators(db, h) + v, err := loadValidatorsInfo(store.db, h) + if err != nil || v.ValidatorSet == nil { + vip, err := store.LoadValidators(h) if err != nil { return err } + + pvi, err := vip.ToProto() + if err != nil { + return err + } + + v.ValidatorSet = pvi v.LastHeightChanged = h - batch.Set(calcValidatorsKey(h), v.Bytes()) + + bz, err := v.Marshal() + if err != nil { + return err + } + err = batch.Set(calcValidatorsKey(h), bz) + if err != nil { + return err + } } } else { - batch.Delete(calcValidatorsKey(h)) + err = batch.Delete(calcValidatorsKey(h)) + if err != nil { + return err + } } if keepParams[h] { - p := loadConsensusParamsInfo(db, h) - if p.ConsensusParams.Equals(&types.ConsensusParams{}) { - p.ConsensusParams, err = LoadConsensusParams(db, h) + p, err := store.loadConsensusParamsInfo(h) + if err != nil { + return err + } + + if p.ConsensusParams.Equal(&tmproto.ConsensusParams{}) { + p.ConsensusParams, err = store.LoadConsensusParams(h) if err != nil { return err } + p.LastHeightChanged = h - batch.Set(calcConsensusParamsKey(h), p.Bytes()) + bz, err := p.Marshal() + if err != nil { + return err + } + + err = batch.Set(calcConsensusParamsKey(h), bz) + if err != nil { + return err + } } } else { - batch.Delete(calcConsensusParamsKey(h)) + err = batch.Delete(calcConsensusParamsKey(h)) + if err != nil { + return err + } } - batch.Delete(calcABCIResponsesKey(h)) + err = batch.Delete(calcABCIResponsesKey(h)) + if err != nil { + return err + } pruned++ // avoid batches growing too large by flushing to database regularly @@ -208,7 +331,7 @@ func PruneStates(db dbm.DB, from int64, to int64) error { return err } batch.Close() - batch = db.NewBatch() + batch = store.db.NewBatch() defer batch.Close() } } @@ -221,42 +344,34 @@ func PruneStates(db dbm.DB, from int64, to int64) error { return nil } -// NewABCIResponses returns a new ABCIResponses -func NewABCIResponses(block *types.Block) *ABCIResponses { - resDeliverTxs := make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) - if len(block.Data.Txs) == 0 { - // This makes Amino encoding/decoding consistent. - resDeliverTxs = nil - } - return &ABCIResponses{ - DeliverTxs: resDeliverTxs, - } -} - -// Bytes serializes the ABCIResponse using go-amino. -func (arz *ABCIResponses) Bytes() []byte { - return cdc.MustMarshalBinaryBare(arz) -} +//------------------------------------------------------------------------ -func (arz *ABCIResponses) ResultsHash() []byte { - results := types.NewResults(arz.DeliverTxs) - return results.Hash() +// ABCIResponsesResultsHash returns the root hash of a Merkle tree of +// ResponseDeliverTx responses (see ABCIResults.Hash) +// +// See merkle.SimpleHashFromByteSlices +func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { + return types.NewResults(ar.DeliverTxs).Hash() } -// LoadABCIResponses loads the ABCIResponses for the given height from the database. -// This is useful for recovering from crashes where we called app.Commit and before we called -// s.Save(). It can also be used to produce Merkle proofs of the result of txs. -func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { - buf, err := db.Get(calcABCIResponsesKey(height)) +// LoadABCIResponses loads the ABCIResponses for the given height from the +// database. If not found, ErrNoABCIResponsesForHeight is returned. +// +// This is useful for recovering from crashes where we called app.Commit and +// before we called s.Save(). It can also be used to produce Merkle proofs of +// the result of txs. +func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { + buf, err := store.db.Get(calcABCIResponsesKey(height)) if err != nil { return nil, err } if len(buf) == 0 { + return nil, ErrNoABCIResponsesForHeight{height} } - abciResponses := new(ABCIResponses) - err = cdc.UnmarshalBinaryBare(buf, abciResponses) + abciResponses := new(tmstate.ABCIResponses) + err = abciResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has @@ -273,46 +388,71 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { // Merkle proofs. // // Exposed for testing. -func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { - db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) -} +func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { + var dtxs []*abci.ResponseDeliverTx + // strip nil values, + for _, tx := range abciResponses.DeliverTxs { + if tx != nil { + dtxs = append(dtxs, tx) + } + } + abciResponses.DeliverTxs = dtxs -//----------------------------------------------------------------------------- + bz, err := abciResponses.Marshal() + if err != nil { + return err + } -// ValidatorsInfo represents the latest validator set, or the last height it changed -type ValidatorsInfo struct { - ValidatorSet *types.ValidatorSet - LastHeightChanged int64 -} + err = store.db.SetSync(calcABCIResponsesKey(height), bz) + if err != nil { + return err + } -// Bytes serializes the ValidatorsInfo using go-amino. -func (valInfo *ValidatorsInfo) Bytes() []byte { - return cdc.MustMarshalBinaryBare(valInfo) + return nil } +//----------------------------------------------------------------------------- + // LoadValidators loads the ValidatorSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { - valInfo := loadValidatorsInfo(db, height) - if valInfo == nil { +func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { + valInfo, err := loadValidatorsInfo(store.db, height) + if err != nil { return nil, ErrNoValSetForHeight{height} } if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) - valInfo2 := loadValidatorsInfo(db, lastStoredHeight) - if valInfo2 == nil || valInfo2.ValidatorSet == nil { - panic( - fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", + valInfo2, err := loadValidatorsInfo(store.db, lastStoredHeight) + if err != nil || valInfo2.ValidatorSet == nil { + return nil, + fmt.Errorf("couldn't find validators at height %d (height %d was originally requested): %w", lastStoredHeight, height, - ), - ) + err, + ) + } + + vs, err := types.ValidatorSetFromProto(valInfo2.ValidatorSet) + if err != nil { + return nil, err } - valInfo2.ValidatorSet.IncrementProposerPriority(int(height - lastStoredHeight)) // mutate + + vs.IncrementProposerPriority(tmmath.SafeConvertInt32(height - lastStoredHeight)) // mutate + vi2, err := vs.ToProto() + if err != nil { + return nil, err + } + + valInfo2.ValidatorSet = vi2 valInfo = valInfo2 } - return valInfo.ValidatorSet, nil + vip, err := types.ValidatorSetFromProto(valInfo.ValidatorSet) + if err != nil { + return nil, err + } + + return vip, nil } func lastStoredHeightFor(height, lastHeightChanged int64) int64 { @@ -321,17 +461,18 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 { } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { +func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error) { buf, err := db.Get(calcValidatorsKey(height)) if err != nil { - panic(err) + return nil, err } + if len(buf) == 0 { - return nil + return nil, errors.New("value retrieved from db is empty") } - v := new(ValidatorsInfo) - err = cdc.UnmarshalBinaryBare(buf, v) + v := new(tmstate.ValidatorsInfo) + err = v.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: @@ -339,7 +480,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { } // TODO: ensure that buf is completely read. - return v + return v, nil } // saveValidatorsInfo persists the validator set. @@ -347,91 +488,107 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { +func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet) error { if lastHeightChanged > height { - panic("LastHeightChanged cannot be greater than ValidatorsInfo height") + return errors.New("lastHeightChanged cannot be greater than ValidatorsInfo height") } - valInfo := &ValidatorsInfo{ + valInfo := &tmstate.ValidatorsInfo{ LastHeightChanged: lastHeightChanged, } // Only persist validator set if it was updated or checkpoint height (see // valSetCheckpointInterval) is reached. if height == lastHeightChanged || height%valSetCheckpointInterval == 0 { - valInfo.ValidatorSet = valSet + pv, err := valSet.ToProto() + if err != nil { + return err + } + valInfo.ValidatorSet = pv } - db.Set(calcValidatorsKey(height), valInfo.Bytes()) + + bz, err := valInfo.Marshal() + if err != nil { + return err + } + + err = store.db.Set(calcValidatorsKey(height), bz) + if err != nil { + return err + } + + return nil } //----------------------------------------------------------------------------- // ConsensusParamsInfo represents the latest consensus params, or the last height it changed -type ConsensusParamsInfo struct { - ConsensusParams types.ConsensusParams - LastHeightChanged int64 -} - -// Bytes serializes the ConsensusParamsInfo using go-amino. -func (params ConsensusParamsInfo) Bytes() []byte { - return cdc.MustMarshalBinaryBare(params) -} // LoadConsensusParams loads the ConsensusParams for a given height. -func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) { - empty := types.ConsensusParams{} +func (store dbStore) LoadConsensusParams(height int64) (tmproto.ConsensusParams, error) { + empty := tmproto.ConsensusParams{} - paramsInfo := loadConsensusParamsInfo(db, height) - if paramsInfo == nil { - return empty, ErrNoConsensusParamsForHeight{height} + paramsInfo, err := store.loadConsensusParamsInfo(height) + if err != nil { + return empty, fmt.Errorf("could not find consensus params for height #%d: %w", height, err) } - if paramsInfo.ConsensusParams.Equals(&empty) { - paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) - if paramsInfo2 == nil { - panic( - fmt.Sprintf( - "Couldn't find consensus params at height %d as last changed from height %d", - paramsInfo.LastHeightChanged, - height, - ), + if paramsInfo.ConsensusParams.Equal(&empty) { + paramsInfo2, err := store.loadConsensusParamsInfo(paramsInfo.LastHeightChanged) + if err != nil { + return empty, fmt.Errorf( + "couldn't find consensus params at height %d as last changed from height %d: %w", + paramsInfo.LastHeightChanged, + height, + err, ) } + paramsInfo = paramsInfo2 } return paramsInfo.ConsensusParams, nil } -func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { - buf, err := db.Get(calcConsensusParamsKey(height)) +func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusParamsInfo, error) { + buf, err := store.db.Get(calcConsensusParamsKey(height)) if err != nil { - panic(err) + return nil, err } if len(buf) == 0 { - return nil + return nil, errors.New("value retrieved from db is empty") } - paramsInfo := new(ConsensusParamsInfo) - err = cdc.UnmarshalBinaryBare(buf, paramsInfo) - if err != nil { + paramsInfo := new(tmstate.ConsensusParamsInfo) + if err = paramsInfo.Unmarshal(buf); err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED tmos.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. - return paramsInfo + return paramsInfo, nil } // saveConsensusParamsInfo persists the consensus params for the next block to disk. // It should be called from s.Save(), right before the state itself is persisted. // If the consensus params did not change after processing the latest block, // only the last height for which they changed is persisted. -func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { - paramsInfo := &ConsensusParamsInfo{ +func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params tmproto.ConsensusParams) error { + paramsInfo := &tmstate.ConsensusParamsInfo{ LastHeightChanged: changeHeight, } + if changeHeight == nextHeight { paramsInfo.ConsensusParams = params } - db.Set(calcConsensusParamsKey(nextHeight), paramsInfo.Bytes()) + bz, err := paramsInfo.Marshal() + if err != nil { + return err + } + + err = store.db.Set(calcConsensusParamsKey(nextHeight), bz) + if err != nil { + return err + } + + return nil } diff --git a/state/store_test.go b/state/store_test.go index 46e1a7dd1..e43921519 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -10,28 +10,38 @@ import ( dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) // 1) LoadValidators loads validators using a height where they were last changed - sm.SaveValidatorsInfo(stateDB, 1, 1, vals) - sm.SaveValidatorsInfo(stateDB, 2, 1, vals) - loadedVals, err := sm.LoadValidators(stateDB, 2) + err := sm.SaveValidatorsInfo(stateDB, 1, 1, vals) + require.NoError(t, err) + err = sm.SaveValidatorsInfo(stateDB, 2, 1, vals) + require.NoError(t, err) + loadedVals, err := stateStore.LoadValidators(2) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) // 2) LoadValidators loads validators using a checkpoint height - sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + err = sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + require.NoError(t, err) - loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) + loadedVals, err = stateStore.LoadValidators(sm.ValSetCheckpointInterval) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) } @@ -42,22 +52,29 @@ func BenchmarkLoadValidators(b *testing.B) { config := cfg.ResetTestRoot("state_") defer os.RemoveAll(config.RootDir) dbType := dbm.BackendType(config.DBBackend) - stateDB := dbm.NewDB("state", dbType, config.DBDir()) - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + require.NoError(b, err) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { b.Fatal(err) } + state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err = stateStore.Save(state) + require.NoError(b, err) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... i := i - sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + if err := sm.SaveValidatorsInfo(stateDB, + int64(i), state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + b.Fatal(err) + } b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { for n := 0; n < b.N; n++ { - _, err := sm.LoadValidators(stateDB, int64(i)) + _, err := stateStore.LoadValidators(int64(i)) if err != nil { b.Fatal(err) } @@ -90,10 +107,12 @@ func TestPruneStates(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { db := dbm.NewMemDB() + stateStore := sm.NewStore(db) + pk := ed25519.GenPrivKey().PubKey() // Generate a bunch of state data. Validators change for heights ending with 3, and // parameters when ending with 5. - validator := &types.Validator{Address: []byte{1, 2, 3}, VotingPower: 100} + validator := &types.Validator{Address: tmrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} validatorSet := &types.ValidatorSet{ Validators: []*types.Validator{validator}, Proposer: validator, @@ -109,30 +128,37 @@ func TestPruneStates(t *testing.T) { paramsChanged = h } - sm.SaveState(db, sm.State{ + state := sm.State{ + InitialHeight: 1, LastBlockHeight: h - 1, Validators: validatorSet, NextValidators: validatorSet, - ConsensusParams: types.ConsensusParams{ - Block: types.BlockParams{MaxBytes: 10e6}, + ConsensusParams: tmproto.ConsensusParams{ + Block: tmproto.BlockParams{MaxBytes: 10e6}, }, LastHeightValidatorsChanged: valsChanged, LastHeightConsensusParamsChanged: paramsChanged, - }) - sm.SaveABCIResponses(db, h, sm.NewABCIResponses(&types.Block{ - Header: types.Header{Height: h}, - Data: types.Data{ - Txs: types.Txs{ - []byte{1}, - []byte{2}, - []byte{3}, - }, + } + + if state.LastBlockHeight >= 1 { + state.LastValidators = state.Validators + } + + err := stateStore.Save(state) + require.NoError(t, err) + + err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ + DeliverTxs: []*abci.ResponseDeliverTx{ + {Data: []byte{1}}, + {Data: []byte{2}}, + {Data: []byte{3}}, }, - })) + }) + require.NoError(t, err) } // Test assertions - err := sm.PruneStates(db, tc.pruneFrom, tc.pruneTo) + err := stateStore.PruneStates(tc.pruneFrom, tc.pruneTo) if tc.expectErr { require.Error(t, err) return @@ -144,7 +170,7 @@ func TestPruneStates(t *testing.T) { expectABCI := sliceToMap(tc.expectABCI) for h := int64(1); h <= tc.makeHeights; h++ { - vals, err := sm.LoadValidators(db, h) + vals, err := stateStore.LoadValidators(h) if expectVals[h] { require.NoError(t, err, "validators height %v", h) require.NotNil(t, vals) @@ -153,16 +179,15 @@ func TestPruneStates(t *testing.T) { require.Equal(t, sm.ErrNoValSetForHeight{Height: h}, err) } - params, err := sm.LoadConsensusParams(db, h) + params, err := stateStore.LoadConsensusParams(h) if expectParams[h] { require.NoError(t, err, "params height %v", h) - require.False(t, params.Equals(&types.ConsensusParams{})) + require.False(t, params.Equal(&tmproto.ConsensusParams{})) } else { require.Error(t, err, "params height %v", h) - require.Equal(t, sm.ErrNoConsensusParamsForHeight{Height: h}, err) } - abci, err := sm.LoadABCIResponses(db, h) + abci, err := stateStore.LoadABCIResponses(h) if expectABCI[h] { require.NoError(t, err, "abci height %v", h) require.NotNil(t, abci) @@ -175,6 +200,28 @@ func TestPruneStates(t *testing.T) { } } +func TestABCIResponsesResultsHash(t *testing.T) { + responses := &tmstate.ABCIResponses{ + BeginBlock: &abci.ResponseBeginBlock{}, + DeliverTxs: []*abci.ResponseDeliverTx{ + {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, + }, + EndBlock: &abci.ResponseEndBlock{}, + } + + root := sm.ABCIResponsesResultsHash(responses) + + // root should be Merkle tree root of DeliverTxs responses + results := types.NewResults(responses.DeliverTxs) + assert.Equal(t, root, results.Hash()) + + // test we can prove first DeliverTx + proof := results.ProveResult(0) + bz, err := results[0].Marshal() + require.NoError(t, err) + assert.NoError(t, proof.Verify(root, bz)) +} + func sliceToMap(s []int64) map[int64]bool { m := make(map[int64]bool, len(s)) for _, i := range s { diff --git a/state/tx_filter.go b/state/tx_filter.go index a8c0627dc..52d055966 100644 --- a/state/tx_filter.go +++ b/state/tx_filter.go @@ -8,11 +8,11 @@ import ( // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. func TxPreCheck(state State) mempl.PreCheckFunc { - maxDataBytes := types.MaxDataBytesUnknownEvidence( + maxDataBytes := types.MaxDataBytesNoEvidence( state.ConsensusParams.Block.MaxBytes, state.Validators.Size(), ) - return mempl.PreCheckAminoMaxBytes(maxDataBytes) + return mempl.PreCheckMaxBytes(maxDataBytes) } // TxPostCheck returns a function to filter transactions after processing. diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 2dac856bd..7936d94c7 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -17,6 +17,7 @@ import ( func TestTxFilter(t *testing.T) { genDoc := randomGenesisDoc() genDoc.ConsensusParams.Block.MaxBytes = 3000 + genDoc.ConsensusParams.Evidence.MaxBytes = 1500 // Max size of Txs is much smaller than size of block, // since we need to account for commits and evidence. @@ -24,17 +25,16 @@ func TestTxFilter(t *testing.T) { tx types.Tx isErr bool }{ - {types.Tx(tmrand.Bytes(250)), false}, - {types.Tx(tmrand.Bytes(1811)), false}, - {types.Tx(tmrand.Bytes(1831)), false}, - {types.Tx(tmrand.Bytes(1838)), true}, - {types.Tx(tmrand.Bytes(1839)), true}, + {types.Tx(tmrand.Bytes(2155)), false}, + {types.Tx(tmrand.Bytes(2156)), true}, {types.Tx(tmrand.Bytes(3000)), true}, } for i, tc := range testCases { - stateDB := dbm.NewDB("state", "memdb", os.TempDir()) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) + require.NoError(t, err) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) require.NoError(t, err) f := sm.TxPreCheck(state) diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index 5f7ee7544..e141f82d8 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -4,8 +4,8 @@ import ( "context" "errors" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" ) // TxIndexer interface defines methods to index and search transactions. @@ -15,14 +15,14 @@ type TxIndexer interface { AddBatch(b *Batch) error // Index analyzes, indexes and stores a single transaction. - Index(result *types.TxResult) error + Index(result *abci.TxResult) error // Get returns the transaction specified by hash or nil if the transaction is not indexed // or stored. - Get(hash []byte) (*types.TxResult, error) + Get(hash []byte) (*abci.TxResult, error) // Search allows you to query for transactions. - Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) + Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) } //---------------------------------------------------- @@ -31,18 +31,18 @@ type TxIndexer interface { // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []*types.TxResult + Ops []*abci.TxResult } // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { return &Batch{ - Ops: make([]*types.TxResult, n), + Ops: make([]*abci.TxResult, n), } } // Add or update an entry for the given result.Index. -func (b *Batch) Add(result *types.TxResult) error { +func (b *Batch) Add(result *abci.TxResult) error { b.Ops[result.Index] = result return nil } diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index 23968dbca..5394e3a7e 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -22,37 +22,48 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { eventBus.SetLogger(log.TestingLogger()) err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) // tx indexer store := db.NewMemDB() - txIndexer := kv.NewTxIndex(store, kv.IndexAllEvents()) + txIndexer := kv.NewTxIndex(store) service := txindex.NewIndexerService(txIndexer, eventBus) service.SetLogger(log.TestingLogger()) err = service.Start() require.NoError(t, err) - defer service.Stop() + t.Cleanup(func() { + if err := service.Stop(); err != nil { + t.Error(err) + } + }) // publish block with txs - eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, NumTxs: int64(2), }) - txResult1 := &types.TxResult{ + require.NoError(t, err) + txResult1 := &abci.TxResult{ Height: 1, Index: uint32(0), Tx: types.Tx("foo"), Result: abci.ResponseDeliverTx{Code: 0}, } - eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) - txResult2 := &types.TxResult{ + err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) + require.NoError(t, err) + txResult2 := &abci.TxResult{ Height: 1, Index: uint32(1), Tx: types.Tx("bar"), Result: abci.ResponseDeliverTx{Code: 0}, } - eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) + err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) diff --git a/state/txindex/kv/codec.go b/state/txindex/kv/codec.go deleted file mode 100644 index de168b228..000000000 --- a/state/txindex/kv/codec.go +++ /dev/null @@ -1,10 +0,0 @@ -package kv - -import ( - amino "github.com/tendermint/go-amino" -) - -var cdc = amino.NewCodec() - -func init() { -} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index c5ba99ca3..b056e9dd4 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -9,12 +9,11 @@ import ( "strings" "time" - "github.com/pkg/errors" - + "github.com/gogo/protobuf/proto" dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - tmstring "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" ) @@ -27,37 +26,19 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) // TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { - store dbm.DB - compositeKeysToIndex []string - indexAllEvents bool + store dbm.DB } // NewTxIndex creates new KV indexer. -func NewTxIndex(store dbm.DB, options ...func(*TxIndex)) *TxIndex { - txi := &TxIndex{store: store, compositeKeysToIndex: make([]string, 0), indexAllEvents: false} - for _, o := range options { - o(txi) - } - return txi -} - -// IndexEvents is an option for setting which composite keys to index. -func IndexEvents(compositeKeys []string) func(*TxIndex) { - return func(txi *TxIndex) { - txi.compositeKeysToIndex = compositeKeys - } -} - -// IndexAllEvents is an option for indexing all events. -func IndexAllEvents() func(*TxIndex) { - return func(txi *TxIndex) { - txi.indexAllEvents = true +func NewTxIndex(store dbm.DB) *TxIndex { + return &TxIndex{ + store: store, } } // Get gets transaction from the TxIndex storage and returns it or nil if the // transaction is not found. -func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { +func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { if len(hash) == 0 { return nil, txindex.ErrorEmptyHash } @@ -70,8 +51,8 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { return nil, nil } - txResult := new(types.TxResult) - err = cdc.UnmarshalBinaryBare(rawBytes, &txResult) + txResult := new(abci.TxResult) + err = proto.Unmarshal(rawBytes, txResult) if err != nil { return nil, fmt.Errorf("error reading TxResult: %v", err) } @@ -88,59 +69,70 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { defer storeBatch.Close() for _, result := range b.Ops { - hash := result.Tx.Hash() + hash := types.Tx(result.Tx).Hash() // index tx by events - txi.indexEvents(result, hash, storeBatch) + err := txi.indexEvents(result, hash, storeBatch) + if err != nil { + return err + } - // index tx by height - if txi.indexAllEvents || tmstring.StringInSlice(types.TxHeightKey, txi.compositeKeysToIndex) { - storeBatch.Set(keyForHeight(result), hash) + // index by height (always) + err = storeBatch.Set(keyForHeight(result), hash) + if err != nil { + return err } - // index tx by hash - rawBytes, err := cdc.MarshalBinaryBare(result) + rawBytes, err := proto.Marshal(result) + if err != nil { + return err + } + // index by hash (always) + err = storeBatch.Set(hash, rawBytes) if err != nil { return err } - storeBatch.Set(hash, rawBytes) } - storeBatch.WriteSync() - return nil + return storeBatch.WriteSync() } // Index indexes a single transaction using the given list of events. Each key // that indexed from the tx's events is a composite of the event type and the // respective attribute's key delimited by a "." (eg. "account.number"). // Any event with an empty type is not indexed. -func (txi *TxIndex) Index(result *types.TxResult) error { +func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() defer b.Close() - hash := result.Tx.Hash() + hash := types.Tx(result.Tx).Hash() // index tx by events - txi.indexEvents(result, hash, b) - - // index tx by height - if txi.indexAllEvents || tmstring.StringInSlice(types.TxHeightKey, txi.compositeKeysToIndex) { - b.Set(keyForHeight(result), hash) + err := txi.indexEvents(result, hash, b) + if err != nil { + return err } - // index tx by hash - rawBytes, err := cdc.MarshalBinaryBare(result) + // index by height (always) + err = b.Set(keyForHeight(result), hash) if err != nil { return err } - b.Set(hash, rawBytes) - b.WriteSync() + rawBytes, err := proto.Marshal(result) + if err != nil { + return err + } + // index by hash (always) + err = b.Set(hash, rawBytes) + if err != nil { + return err + } - return nil + return b.WriteSync() } -func (txi *TxIndex) indexEvents(result *types.TxResult, hash []byte, store dbm.SetDeleter) { +func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Batch) error { for _, event := range result.Result.Events { // only index events with a non-empty type if len(event.Type) == 0 { @@ -152,12 +144,18 @@ func (txi *TxIndex) indexEvents(result *types.TxResult, hash []byte, store dbm.S continue } + // index if `index: true` is set compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) - if txi.indexAllEvents || tmstring.StringInSlice(compositeTag, txi.compositeKeysToIndex) { - store.Set(keyForEvent(compositeTag, attr.Value, result), hash) + if attr.GetIndex() { + err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) + if err != nil { + return err + } } } } + + return nil } // Search performs a search using the given query. @@ -171,11 +169,11 @@ func (txi *TxIndex) indexEvents(result *types.TxResult, hash []byte, store dbm.S // // Search will exit early and return any result fetched so far, // when a message is received on the context chan. -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) { +func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { // Potentially exit early. select { case <-ctx.Done(): - results := make([]*types.TxResult, 0) + results := make([]*abci.TxResult, 0) return results, nil default: } @@ -186,22 +184,22 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*types.TxResu // get a list of conditions (like "tx.height > 5") conditions, err := q.Conditions() if err != nil { - return nil, errors.Wrap(err, "error during parsing conditions from query") + return nil, fmt.Errorf("error during parsing conditions from query: %w", err) } // if there is a hash condition, return the result immediately hash, ok, err := lookForHash(conditions) if err != nil { - return nil, errors.Wrap(err, "error during searching for a hash in the query") + return nil, fmt.Errorf("error during searching for a hash in the query: %w", err) } else if ok { res, err := txi.Get(hash) switch { case err != nil: - return []*types.TxResult{}, errors.Wrap(err, "error while retrieving the result") + return []*abci.TxResult{}, fmt.Errorf("error while retrieving the result: %w", err) case res == nil: - return []*types.TxResult{}, nil + return []*abci.TxResult{}, nil default: - return []*types.TxResult{res}, nil + return []*abci.TxResult{res}, nil } } @@ -254,11 +252,11 @@ func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*types.TxResu } } - results := make([]*types.TxResult, 0, len(filteredHashes)) + results := make([]*abci.TxResult, 0, len(filteredHashes)) for _, h := range filteredHashes { res, err := txi.Get(h) if err != nil { - return nil, errors.Wrapf(err, "failed to get Tx{%X}", h) + return nil, fmt.Errorf("failed to get Tx{%X}: %w", h, err) } results = append(results, res) @@ -425,6 +423,32 @@ func (txi *TxIndex) match( default: } } + if err := it.Error(); err != nil { + panic(err) + } + + case c.Op == query.OpExists: + // XXX: can't use startKeyBz here because c.Operand is nil + // (e.g. "account.owner//" won't match w/ a single row) + it, err := dbm.IteratePrefix(txi.store, startKey(c.CompositeKey)) + if err != nil { + panic(err) + } + defer it.Close() + + for ; it.Valid(); it.Next() { + tmpHashes[string(it.Value())] = it.Value() + + // Potentially exit early. + select { + case <-ctx.Done(): + break + default: + } + } + if err := it.Error(); err != nil { + panic(err) + } case c.Op == query.OpContains: // XXX: startKey does not apply here. @@ -452,6 +476,9 @@ func (txi *TxIndex) match( default: } } + if err := it.Error(); err != nil { + panic(err) + } default: panic("other operators should be handled already") } @@ -553,6 +580,9 @@ LOOP: default: } } + if err := it.Error(); err != nil { + panic(err) + } if len(tmpHashes) == 0 || firstRun { // Either: @@ -583,7 +613,6 @@ LOOP: return filteredHashes } -/////////////////////////////////////////////////////////////////////////////// // Keys func isTagKey(key []byte) bool { @@ -595,7 +624,7 @@ func extractValueFromKey(key []byte) string { return parts[1] } -func keyForEvent(key string, value []byte, result *types.TxResult) []byte { +func keyForEvent(key string, value []byte, result *abci.TxResult) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d", key, value, @@ -604,7 +633,7 @@ func keyForEvent(key string, value []byte, result *types.TxResult) []byte { )) } -func keyForHeight(result *types.TxResult) []byte { +func keyForHeight(result *abci.TxResult) []byte { return []byte(fmt.Sprintf("%s/%d/%d/%d", types.TxHeightKey, result.Height, diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index 31267f54c..fdfe550f3 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -10,7 +10,6 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -26,16 +25,15 @@ func BenchmarkTxSearch(b *testing.B) { b.Errorf("failed to create database: %s", err) } - allowedKeys := []string{"transfer.address", "transfer.amount"} - indexer := NewTxIndex(db, IndexEvents(allowedKeys)) + indexer := NewTxIndex(db) for i := 0; i < 35000; i++ { events := []abci.Event{ { Type: "transfer", - Attributes: []kv.Pair{ - {Key: []byte("address"), Value: []byte(fmt.Sprintf("address_%d", i%100))}, - {Key: []byte("amount"), Value: []byte("50")}, + Attributes: []abci.EventAttribute{ + {Key: []byte("address"), Value: []byte(fmt.Sprintf("address_%d", i%100)), Index: true}, + {Key: []byte("amount"), Value: []byte("50"), Index: true}, }, }, } @@ -45,7 +43,7 @@ func BenchmarkTxSearch(b *testing.B) { b.Errorf("failed produce random bytes: %s", err) } - txResult := &types.TxResult{ + txResult := &abci.TxResult{ Height: int64(i), Index: 0, Tx: types.Tx(string(txBz)), diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index fcd2eba13..9b15c1971 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -7,13 +7,13 @@ import ( "os" "testing" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" db "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/state/txindex" @@ -24,7 +24,7 @@ func TestTxIndex(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{ + txResult := &abci.TxResult{ Height: 1, Index: 0, Tx: tx, @@ -44,10 +44,10 @@ func TestTxIndex(t *testing.T) { loadedTxResult, err := indexer.Get(hash) require.NoError(t, err) - assert.Equal(t, txResult, loadedTxResult) + assert.True(t, proto.Equal(txResult, loadedTxResult)) tx2 := types.Tx("BYE BYE WORLD") - txResult2 := &types.TxResult{ + txResult2 := &abci.TxResult{ Height: 1, Index: 0, Tx: tx2, @@ -63,19 +63,18 @@ func TestTxIndex(t *testing.T) { loadedTxResult2, err := indexer.Get(hash2) require.NoError(t, err) - assert.Equal(t, txResult2, loadedTxResult2) + assert.True(t, proto.Equal(txResult2, loadedTxResult2)) } func TestTxSearch(t *testing.T) { - allowedKeys := []string{"account.number", "account.owner", "account.date"} - indexer := NewTxIndex(db.NewMemDB(), IndexEvents(allowedKeys)) + indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("1")}}}, - {Type: "account", Attributes: []kv.Pair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, - {Type: "", Attributes: []kv.Pair{{Key: []byte("not_allowed"), Value: []byte("Vlad")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("owner"), Value: []byte("Ivan"), Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: []byte("not_allowed"), Value: []byte("Vlad"), Index: true}}}, }) - hash := txResult.Tx.Hash() + hash := types.Tx(txResult.Tx).Hash() err := indexer.Index(txResult) require.NoError(t, err) @@ -117,6 +116,10 @@ func TestTxSearch(t *testing.T) { {"account.owner CONTAINS 'Vlad'", 0}, // search using the wrong key (of numeric type) using CONTAINS {"account.number CONTAINS 'Iv'", 0}, + // search using EXISTS + {"account.number EXISTS", 1}, + // search using EXISTS for non existing key + {"account.date EXISTS", 0}, } ctx := context.Background() @@ -129,20 +132,21 @@ func TestTxSearch(t *testing.T) { assert.Len(t, results, tc.resultsLength) if tc.resultsLength > 0 { - assert.Equal(t, []*types.TxResult{txResult}, results) + for _, txr := range results { + assert.True(t, proto.Equal(txResult, txr)) + } } }) } } func TestTxSearchWithCancelation(t *testing.T) { - allowedKeys := []string{"account.number", "account.owner", "account.date"} - indexer := NewTxIndex(db.NewMemDB(), IndexEvents(allowedKeys)) + indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("1")}}}, - {Type: "account", Attributes: []kv.Pair{{Key: []byte("owner"), Value: []byte("Ivan")}}}, - {Type: "", Attributes: []kv.Pair{{Key: []byte("not_allowed"), Value: []byte("Vlad")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("owner"), Value: []byte("Ivan"), Index: true}}}, + {Type: "", Attributes: []abci.EventAttribute{{Key: []byte("not_allowed"), Value: []byte("Vlad"), Index: true}}}, }) err := indexer.Index(txResult) require.NoError(t, err) @@ -155,14 +159,13 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - allowedKeys := []string{"account.number", "sender"} - indexer := NewTxIndex(db.NewMemDB(), IndexEvents(allowedKeys)) + indexer := NewTxIndex(db.NewMemDB()) // index tx using events indexing (composite key) txResult1 := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, }) - hash1 := txResult1.Tx.Hash() + hash1 := types.Tx(txResult1.Tx).Hash() err := indexer.Index(txResult1) require.NoError(t, err) @@ -171,10 +174,10 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { txResult2 := txResultWithEvents(nil) txResult2.Tx = types.Tx("HELLO WORLD 2") - hash2 := txResult2.Tx.Hash() + hash2 := types.Tx(txResult2.Tx).Hash() b := indexer.store.NewBatch() - rawBytes, err := cdc.MarshalBinaryBare(txResult2) + rawBytes, err := proto.Marshal(txResult2) require.NoError(t, err) depKey := []byte(fmt.Sprintf("%s/%s/%d/%d", @@ -184,34 +187,38 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { txResult2.Index, )) - b.Set(depKey, hash2) - b.Set(keyForHeight(txResult2), hash2) - b.Set(hash2, rawBytes) - b.Write() + err = b.Set(depKey, hash2) + require.NoError(t, err) + err = b.Set(keyForHeight(txResult2), hash2) + require.NoError(t, err) + err = b.Set(hash2, rawBytes) + require.NoError(t, err) + err = b.Write() + require.NoError(t, err) testCases := []struct { q string - results []*types.TxResult + results []*abci.TxResult }{ // search by hash - {fmt.Sprintf("tx.hash = '%X'", hash1), []*types.TxResult{txResult1}}, + {fmt.Sprintf("tx.hash = '%X'", hash1), []*abci.TxResult{txResult1}}, // search by hash - {fmt.Sprintf("tx.hash = '%X'", hash2), []*types.TxResult{txResult2}}, + {fmt.Sprintf("tx.hash = '%X'", hash2), []*abci.TxResult{txResult2}}, // search by exact match (one key) - {"account.number = 1", []*types.TxResult{txResult1}}, - {"account.number >= 1 AND account.number <= 5", []*types.TxResult{txResult1}}, + {"account.number = 1", []*abci.TxResult{txResult1}}, + {"account.number >= 1 AND account.number <= 5", []*abci.TxResult{txResult1}}, // search by range (lower bound) - {"account.number >= 1", []*types.TxResult{txResult1}}, + {"account.number >= 1", []*abci.TxResult{txResult1}}, // search by range (upper bound) - {"account.number <= 5", []*types.TxResult{txResult1}}, + {"account.number <= 5", []*abci.TxResult{txResult1}}, // search using not allowed key - {"not_allowed = 'boom'", []*types.TxResult{}}, + {"not_allowed = 'boom'", []*abci.TxResult{}}, // search for not existing tx result - {"account.number >= 2 AND account.number <= 5", []*types.TxResult{}}, + {"account.number >= 2 AND account.number <= 5", []*abci.TxResult{}}, // search using not existing key - {"account.date >= TIME 2013-05-03T14:45:00Z", []*types.TxResult{}}, + {"account.date >= TIME 2013-05-03T14:45:00Z", []*abci.TxResult{}}, // search by deprecated key - {"sender = 'addr1'", []*types.TxResult{txResult2}}, + {"sender = 'addr1'", []*abci.TxResult{txResult2}}, } ctx := context.Background() @@ -221,18 +228,21 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { t.Run(tc.q, func(t *testing.T) { results, err := indexer.Search(ctx, query.MustParse(tc.q)) require.NoError(t, err) - require.Equal(t, results, tc.results) + for _, txr := range results { + for _, tr := range tc.results { + assert.True(t, proto.Equal(tr, txr)) + } + } }) } } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - allowedKeys := []string{"account.number"} - indexer := NewTxIndex(db.NewMemDB(), IndexEvents(allowedKeys)) + indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("1")}}}, - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("2")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("2"), Index: true}}}, }) err := indexer.Index(txResult) @@ -244,16 +254,17 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { assert.NoError(t, err) assert.Len(t, results, 1) - assert.Equal(t, []*types.TxResult{txResult}, results) + for _, txr := range results { + assert.True(t, proto.Equal(txResult, txr)) + } } func TestTxSearchMultipleTxs(t *testing.T) { - allowedKeys := []string{"account.number", "account.number.id"} - indexer := NewTxIndex(db.NewMemDB(), IndexEvents(allowedKeys)) + indexer := NewTxIndex(db.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("1")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, }) txResult.Tx = types.Tx("Bob's account") @@ -264,7 +275,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { // indexed second, but smaller height (to test the order of transactions) txResult2 := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("2")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("2"), Index: true}}}, }) txResult2.Tx = types.Tx("Alice's account") txResult2.Height = 1 @@ -275,7 +286,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { // indexed third (to test the order of transactions) txResult3 := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number"), Value: []byte("3")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("3"), Index: true}}}, }) txResult3.Tx = types.Tx("Jack's account") txResult3.Height = 1 @@ -286,7 +297,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { // indexed fourth (to test we don't include txs with similar events) // https://github.com/tendermint/tendermint/issues/2908 txResult4 := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []kv.Pair{{Key: []byte("number.id"), Value: []byte("1")}}}, + {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number.id"), Value: []byte("1"), Index: true}}}, }) txResult4.Tx = types.Tx("Mike's account") txResult4.Height = 2 @@ -302,9 +313,9 @@ func TestTxSearchMultipleTxs(t *testing.T) { require.Len(t, results, 3) } -func txResultWithEvents(events []abci.Event) *types.TxResult { +func txResultWithEvents(events []abci.Event) *abci.TxResult { tx := types.Tx("HELLO WORLD") - return &types.TxResult{ + return &abci.TxResult{ Height: 1, Index: 0, Tx: tx, @@ -319,19 +330,18 @@ func txResultWithEvents(events []abci.Event) *types.TxResult { func benchmarkTxIndex(txsCount int64, b *testing.B) { dir, err := ioutil.TempDir("", "tx_index_db") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) // nolint: errcheck + require.NoError(b, err) + defer os.RemoveAll(dir) - store := db.NewDB("tx_index", "goleveldb", dir) + store, err := db.NewDB("tx_index", "goleveldb", dir) + require.NoError(b, err) indexer := NewTxIndex(store) batch := txindex.NewBatch(txsCount) txIndex := uint32(0) for i := int64(0); i < txsCount; i++ { tx := tmrand.Bytes(250) - txResult := &types.TxResult{ + txResult := &abci.TxResult{ Height: 1, Index: txIndex, Tx: tx, diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 1ae7f7942..98e4db79e 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -4,9 +4,9 @@ import ( "context" "errors" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" ) var _ txindex.TxIndexer = (*TxIndex)(nil) @@ -15,7 +15,7 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) type TxIndex struct{} // Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { +func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } @@ -25,10 +25,10 @@ func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { } // Index is a noop and always returns nil. -func (txi *TxIndex) Index(result *types.TxResult) error { +func (txi *TxIndex) Index(result *abci.TxResult) error { return nil } -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*types.TxResult, error) { - return []*types.TxResult{}, nil +func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { + return []*abci.TxResult{}, nil } diff --git a/state/validation.go b/state/validation.go index ccbcc72e2..e29cea487 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,23 +5,21 @@ import ( "errors" "fmt" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" ) //----------------------------------------------------- // Validate block -func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block *types.Block) error { +func validateBlock(state State, block *types.Block) error { // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err } // Validate basic info. - if block.Version != state.Version.Consensus { + if block.Version.App != state.Version.Consensus.App || + block.Version.Block != state.Version.Consensus.Block { return fmt.Errorf("wrong Block.Header.Version. Expected %v, got %v", state.Version.Consensus, block.Version, @@ -33,13 +31,16 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block block.ChainID, ) } - if block.Height != state.LastBlockHeight+1 { + if state.LastBlockHeight == 0 && block.Height != state.InitialHeight { + return fmt.Errorf("wrong Block.Header.Height. Expected %v for initial block, got %v", + block.Height, state.InitialHeight) + } + if state.LastBlockHeight > 0 && block.Height != state.LastBlockHeight+1 { return fmt.Errorf("wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height, ) } - // Validate prev block info. if !block.LastBlockID.Equals(state.LastBlockID) { return fmt.Errorf("wrong Block.Header.LastBlockID. Expected %v, got %v", @@ -55,9 +56,10 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block block.AppHash, ) } - if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { + hashCP := types.HashConsensusParams(state.ConsensusParams) + if !bytes.Equal(block.ConsensusHash, hashCP) { return fmt.Errorf("wrong Block.Header.ConsensusHash. Expected %X, got %v", - state.ConsensusParams.Hash(), + hashCP, block.ConsensusHash, ) } @@ -81,30 +83,37 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block } // Validate block LastCommit. - if block.Height == 1 { + if block.Height == state.InitialHeight { if len(block.LastCommit.Signatures) != 0 { - return errors.New("block at height 1 can't have LastCommit signatures") + return errors.New("initial block can't have LastCommit signatures") } } else { - if len(block.LastCommit.Signatures) != state.LastValidators.Size() { - return types.NewErrInvalidCommitSignatures(state.LastValidators.Size(), len(block.LastCommit.Signatures)) - } - err := state.LastValidators.VerifyCommit( - state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) - if err != nil { + // LastCommit.Signatures length is checked in VerifyCommit. + if err := state.LastValidators.VerifyCommit( + state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit); err != nil { return err } } + // NOTE: We can't actually verify it's the right proposer because we don't + // know what round the block was first proposed. So just check that it's + // a legit address and a known validator. + // The length is checked in ValidateBasic above. + if !state.Validators.HasAddress(block.ProposerAddress) { + return fmt.Errorf("block.Header.ProposerAddress %X is not a validator", + block.ProposerAddress, + ) + } + // Validate block Time - if block.Height > 1 { + switch { + case block.Height > state.InitialHeight: if !block.Time.After(state.LastBlockTime) { return fmt.Errorf("block time %v not greater than last block time %v", block.Time, state.LastBlockTime, ) } - medianTime := MedianTime(block.LastCommit, state.LastValidators) if !block.Time.Equal(medianTime) { return fmt.Errorf("invalid block time. Expected %v, got %v", @@ -112,7 +121,8 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block block.Time, ) } - } else if block.Height == 1 { + + case block.Height == state.InitialHeight: genesisTime := state.LastBlockTime if !block.Time.Equal(genesisTime) { return fmt.Errorf("block time %v is not equal to genesis time %v", @@ -120,84 +130,15 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block genesisTime, ) } - } - - // Limit the amount of evidence - maxNumEvidence, _ := types.MaxEvidencePerBlock(state.ConsensusParams.Block.MaxBytes) - numEvidence := int64(len(block.Evidence.Evidence)) - if numEvidence > maxNumEvidence { - return types.NewErrEvidenceOverflow(maxNumEvidence, numEvidence) - - } - - // Validate all evidence. - for _, ev := range block.Evidence.Evidence { - if err := VerifyEvidence(stateDB, state, ev); err != nil { - return types.NewErrEvidenceInvalid(ev, err) - } - if evidencePool != nil && evidencePool.IsCommitted(ev) { - return types.NewErrEvidenceInvalid(ev, errors.New("evidence was already committed")) - } - } - // NOTE: We can't actually verify it's the right proposer because we dont - // know what round the block was first proposed. So just check that it's - // a legit address and a known validator. - if len(block.ProposerAddress) != crypto.AddressSize || - !state.Validators.HasAddress(block.ProposerAddress) { - return fmt.Errorf("block.Header.ProposerAddress, %X, is not a validator", - block.ProposerAddress, - ) + default: + return fmt.Errorf("block height %v lower than initial height %v", + block.Height, state.InitialHeight) } - return nil -} - -// VerifyEvidence verifies the evidence fully by checking: -// - it is sufficiently recent (MaxAge) -// - it is from a key who was a validator at the given height -// - it is internally consistent -// - it was properly signed by the alleged equivocator -func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error { - var ( - height = state.LastBlockHeight - evidenceParams = state.ConsensusParams.Evidence - - ageDuration = state.LastBlockTime.Sub(evidence.Time()) - ageNumBlocks = height - evidence.Height() - ) - - if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks { - return fmt.Errorf( - "evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v", - evidence.Height(), - evidence.Time(), - height-evidenceParams.MaxAgeNumBlocks, - state.LastBlockTime.Add(evidenceParams.MaxAgeDuration), - ) - } - - valset, err := LoadValidators(stateDB, evidence.Height()) - if err != nil { - // TODO: if err is just that we cant find it cuz we pruned, ignore. - // TODO: if its actually bad evidence, punish peer - return err - } - - // The address must have been an active validator at the height. - // NOTE: we will ignore evidence from H if the key was not a validator - // at H, even if it is a validator at some nearby H' - // XXX: this makes lite-client bisection as is unsafe - // See https://github.com/tendermint/tendermint/issues/3244 - ev := evidence - height, addr := ev.Height(), ev.Address() - _, val := valset.GetByAddress(addr) - if val == nil { - return fmt.Errorf("address %X was not a validator at height %d", addr, height) - } - - if err := evidence.Verify(state.ChainID, val.PubKey); err != nil { - return err + // Check evidence doesn't exceed the limit amount of bytes. + if max, got := state.ConsensusParams.Evidence.MaxBytes, block.Evidence.ByteSize(); got > max { + return types.NewErrEvidenceOverflow(max, got) } return nil diff --git a/state/validation_test.go b/state/validation_test.go index 373b77dc1..0c9376581 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,13 +4,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/mock" + memmock "github.com/tendermint/tendermint/mempool/mock" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -20,24 +25,25 @@ const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { proxyApp := newTestApp() require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(3, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, - sm.MockEvidencePool{}, + memmock.Mempool{}, + sm.EmptyEvidencePool{}, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) // some bad values wrongHash := tmhash.Sum([]byte("this hash is wrong")) wrongVersion1 := state.Version.Consensus - wrongVersion1.Block++ + wrongVersion1.Block += 2 wrongVersion2 := state.Version.Consensus - wrongVersion2.App++ + wrongVersion2.App += 2 // Manipulation of any header field causes failure. testCases := []struct { @@ -49,8 +55,9 @@ func TestValidateBlockHeader(t *testing.T) { {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 1) }}, + {"Time wrong 2", func(block *types.Block) { block.Time = block.Time.Add(time.Second * 1) }}, - {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartsHeader.Total += 10 }}, + {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartSetHeader.Total += 10 }}, {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, @@ -63,6 +70,11 @@ func TestValidateBlockHeader(t *testing.T) { {"EvidenceHash wrong", func(block *types.Block) { block.EvidenceHash = wrongHash }}, {"Proposer wrong", func(block *types.Block) { block.ProposerAddress = ed25519.GenPrivKey().PubKey().Address() }}, {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, + + {"first LastCommit contains signatures", func(block *types.Block) { + block.LastCommit = types.NewCommit(0, 0, types.BlockID{}, []types.CommitSig{types.NewCommitSigAbsent()}) + block.LastCommitHash = block.LastCommit.Hash() + }}, } // Build up state for multiple heights @@ -75,6 +87,7 @@ func TestValidateBlockHeader(t *testing.T) { block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr) tc.malleateBlock(block) err := blockExec.ValidateBlock(state, block) + t.Logf("%s: %v", tc.name, err) require.Error(t, err, tc.name) } @@ -85,20 +98,34 @@ func TestValidateBlockHeader(t *testing.T) { state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil) require.NoError(t, err, "height %d", height) } + + nextHeight := validationTestsStopHeight + block, _ := state.MakeBlock( + nextHeight, + makeTxs(nextHeight), + lastCommit, + nil, + state.Validators.GetProposer().Address, + ) + state.InitialHeight = nextHeight + 1 + err := blockExec.ValidateBlock(state, block) + require.Error(t, err, "expected an error when state is ahead of block") + assert.Contains(t, err.Error(), "lower than initial height") } func TestValidateBlockCommit(t *testing.T) { proxyApp := newTestApp() require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, - sm.MockEvidencePool{}, + memmock.Mempool{}, + sm.EmptyEvidencePool{}, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil) @@ -181,14 +208,20 @@ func TestValidateBlockCommit(t *testing.T) { Height: height, Round: 0, Timestamp: tmtime.Now(), - Type: types.PrecommitType, + Type: tmproto.PrecommitType, BlockID: blockID, } - err = badPrivVal.SignVote(chainID, goodVote) + + g := goodVote.ToProto() + b := badVote.ToProto() + + err = badPrivVal.SignVote(chainID, g) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote(chainID, badVote) + err = badPrivVal.SignVote(chainID, b) require.NoError(t, err, "height %d", height) + goodVote.Signature, badVote.Signature = g.Signature, b.Signature + wrongSigsCommit = types.NewCommit(goodVote.Height, goodVote.Round, blockID, []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}) } @@ -197,50 +230,66 @@ func TestValidateBlockCommit(t *testing.T) { func TestValidateBlockEvidence(t *testing.T) { proxyApp := newTestApp() require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() + defer proxyApp.Stop() //nolint:errcheck // ignore for tests - state, stateDB, privVals := makeState(3, 1) + state, stateDB, privVals := makeState(4, 1) + stateStore := sm.NewStore(stateDB) + defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + + evpool := &mocks.EvidencePool{} + evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return( + []abci.Evidence{}) + + state.ConsensusParams.Evidence.MaxBytes = 1000 blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, - sm.MockEvidencePool{}, + memmock.Mempool{}, + evpool, ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) for height := int64(1); height < validationTestsStopHeight; height++ { proposerAddr := state.Validators.GetProposer().Address - proposerIdx, _ := state.Validators.GetByAddress(proposerAddr) - goodEvidence := types.NewMockEvidence(height, time.Now(), proposerIdx, proposerAddr) + maxBytesEvidence := state.ConsensusParams.Evidence.MaxBytes if height > 1 { /* A block with too much evidence fails */ - maxBlockSize := state.ConsensusParams.Block.MaxBytes - maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize) - require.True(t, maxNumEvidence > 2) evidence := make([]types.Evidence, 0) - // one more than the maximum allowed evidence - for i := int64(0); i <= maxNumEvidence; i++ { - evidence = append(evidence, goodEvidence) + var currentBytes int64 = 0 + // more bytes than the maximum allowed for evidence + for currentBytes <= maxBytesEvidence { + newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), + privVals[proposerAddr.String()], chainID) + evidence = append(evidence, newEv) + currentBytes += int64(len(newEv.Bytes())) } block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) - _, ok := err.(*types.ErrEvidenceOverflow) - require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d", height) + if assert.Error(t, err) { + _, ok := err.(*types.ErrEvidenceOverflow) + require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d but got %v", height, err) + } } /* A good block with several pieces of good evidence passes */ - maxBlockSize := state.ConsensusParams.Block.MaxBytes - maxNumEvidence, _ := types.MaxEvidencePerBlock(maxBlockSize) - require.True(t, maxNumEvidence > 2) evidence := make([]types.Evidence, 0) + var currentBytes int64 = 0 // precisely the amount of allowed evidence - for i := int64(0); i < maxNumEvidence; i++ { - evidence = append(evidence, goodEvidence) + for { + newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, + privVals[proposerAddr.String()], chainID) + currentBytes += int64(len(newEv.Bytes())) + if currentBytes >= maxBytesEvidence { + break + } + evidence = append(evidence, newEv) } var err error @@ -256,20 +305,3 @@ func TestValidateBlockEvidence(t *testing.T) { require.NoError(t, err, "height %d", height) } } - -func TestValidateFailBlockOnCommittedEvidence(t *testing.T) { - var height int64 = 1 - state, stateDB, _ := makeState(1, int(height)) - - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, mockEvPoolAlwaysCommitted{}) - // A block with a couple pieces of evidence passes. - block := makeBlock(state, height) - addr, _ := state.Validators.GetByIndex(0) - alreadyCommittedEvidence := types.NewMockEvidence(height, time.Now(), 0, addr) - block.Evidence.Evidence = []types.Evidence{alreadyCommittedEvidence} - block.EvidenceHash = block.Evidence.Hash() - err := blockExec.ValidateBlock(state, block) - - require.Error(t, err) - require.IsType(t, err, &types.ErrEvidenceInvalid{}) -} diff --git a/statesync/chunks.go b/statesync/chunks.go new file mode 100644 index 000000000..028c863b9 --- /dev/null +++ b/statesync/chunks.go @@ -0,0 +1,321 @@ +package statesync + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" +) + +// errDone is returned by chunkQueue.Next() when all chunks have been returned. +var errDone = errors.New("chunk queue has completed") + +// chunk contains data for a chunk. +type chunk struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Sender p2p.ID +} + +// chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an +// iterator over all chunks, but callers can request chunks to be retried, optionally after +// refetching. +type chunkQueue struct { + tmsync.Mutex + snapshot *snapshot // if this is nil, the queue has been closed + dir string // temp dir for on-disk chunk storage + chunkFiles map[uint32]string // path to temporary chunk file + chunkSenders map[uint32]p2p.ID // the peer who sent the given chunk + chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() + chunkReturned map[uint32]bool // chunks returned via Next() + waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival +} + +// newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. +// Callers must call Close() when done. +func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { + dir, err := ioutil.TempDir(tempDir, "tm-statesync") + if err != nil { + return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) + } + if snapshot.Chunks == 0 { + return nil, errors.New("snapshot has no chunks") + } + return &chunkQueue{ + snapshot: snapshot, + dir: dir, + chunkFiles: make(map[uint32]string, snapshot.Chunks), + chunkSenders: make(map[uint32]p2p.ID, snapshot.Chunks), + chunkAllocated: make(map[uint32]bool, snapshot.Chunks), + chunkReturned: make(map[uint32]bool, snapshot.Chunks), + waiters: make(map[uint32][]chan<- uint32), + }, nil +} + +// Add adds a chunk to the queue. It ignores chunks that already exist, returning false. +func (q *chunkQueue) Add(chunk *chunk) (bool, error) { + if chunk == nil || chunk.Chunk == nil { + return false, errors.New("cannot add nil chunk") + } + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return false, nil // queue is closed + } + if chunk.Height != q.snapshot.Height { + return false, fmt.Errorf("invalid chunk height %v, expected %v", chunk.Height, q.snapshot.Height) + } + if chunk.Format != q.snapshot.Format { + return false, fmt.Errorf("invalid chunk format %v, expected %v", chunk.Format, q.snapshot.Format) + } + if chunk.Index >= q.snapshot.Chunks { + return false, fmt.Errorf("received unexpected chunk %v", chunk.Index) + } + if q.chunkFiles[chunk.Index] != "" { + return false, nil + } + + path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) + err := ioutil.WriteFile(path, chunk.Chunk, 0600) + if err != nil { + return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) + } + q.chunkFiles[chunk.Index] = path + q.chunkSenders[chunk.Index] = chunk.Sender + + // Signal any waiters that the chunk has arrived. + for _, waiter := range q.waiters[chunk.Index] { + waiter <- chunk.Index + close(waiter) + } + delete(q.waiters, chunk.Index) + + return true, nil +} + +// Allocate allocates a chunk to the caller, making it responsible for fetching it. Returns +// errDone once no chunks are left or the queue is closed. +func (q *chunkQueue) Allocate() (uint32, error) { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return 0, errDone + } + if uint32(len(q.chunkAllocated)) >= q.snapshot.Chunks { + return 0, errDone + } + for i := uint32(0); i < q.snapshot.Chunks; i++ { + if !q.chunkAllocated[i] { + q.chunkAllocated[i] = true + return i, nil + } + } + return 0, errDone +} + +// Close closes the chunk queue, cleaning up all temporary files. +func (q *chunkQueue) Close() error { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return nil + } + for _, waiters := range q.waiters { + for _, waiter := range waiters { + close(waiter) + } + } + q.waiters = nil + q.snapshot = nil + err := os.RemoveAll(q.dir) + if err != nil { + return fmt.Errorf("failed to clean up state sync tempdir %v: %w", q.dir, err) + } + return nil +} + +// Discard discards a chunk. It will be removed from the queue, available for allocation, and can +// be added and returned via Next() again. If the chunk is not already in the queue this does +// nothing, to avoid it being allocated to multiple fetchers. +func (q *chunkQueue) Discard(index uint32) error { + q.Lock() + defer q.Unlock() + return q.discard(index) +} + +// discard discards a chunk, scheduling it for refetching. The caller must hold the mutex lock. +func (q *chunkQueue) discard(index uint32) error { + if q.snapshot == nil { + return nil + } + path := q.chunkFiles[index] + if path == "" { + return nil + } + err := os.Remove(path) + if err != nil { + return fmt.Errorf("failed to remove chunk %v: %w", index, err) + } + delete(q.chunkFiles, index) + delete(q.chunkReturned, index) + delete(q.chunkAllocated, index) + return nil +} + +// DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to +// discard already returned chunks, this can be done via Discard(). +func (q *chunkQueue) DiscardSender(peerID p2p.ID) error { + q.Lock() + defer q.Unlock() + + for index, sender := range q.chunkSenders { + if sender == peerID && !q.chunkReturned[index] { + err := q.discard(index) + if err != nil { + return err + } + delete(q.chunkSenders, index) + } + } + return nil +} + +// GetSender returns the sender of the chunk with the given index, or empty if not found. +func (q *chunkQueue) GetSender(index uint32) p2p.ID { + q.Lock() + defer q.Unlock() + return q.chunkSenders[index] +} + +// Has checks whether a chunk exists in the queue. +func (q *chunkQueue) Has(index uint32) bool { + q.Lock() + defer q.Unlock() + return q.chunkFiles[index] != "" +} + +// load loads a chunk from disk, or nil if the chunk is not in the queue. The caller must hold the +// mutex lock. +func (q *chunkQueue) load(index uint32) (*chunk, error) { + path, ok := q.chunkFiles[index] + if !ok { + return nil, nil + } + body, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) + } + return &chunk{ + Height: q.snapshot.Height, + Format: q.snapshot.Format, + Index: index, + Chunk: body, + Sender: q.chunkSenders[index], + }, nil +} + +// Next returns the next chunk from the queue, or errDone if all chunks have been returned. It +// blocks until the chunk is available. Concurrent Next() calls may return the same chunk. +func (q *chunkQueue) Next() (*chunk, error) { + q.Lock() + var chunk *chunk + index, err := q.nextUp() + if err == nil { + chunk, err = q.load(index) + if err == nil { + q.chunkReturned[index] = true + } + } + q.Unlock() + if chunk != nil || err != nil { + return chunk, err + } + + select { + case _, ok := <-q.WaitFor(index): + if !ok { + return nil, errDone // queue closed + } + case <-time.After(chunkTimeout): + return nil, errTimeout + } + + q.Lock() + defer q.Unlock() + chunk, err = q.load(index) + if err != nil { + return nil, err + } + q.chunkReturned[index] = true + return chunk, nil +} + +// nextUp returns the next chunk to be returned, or errDone if all chunks have been returned. The +// caller must hold the mutex lock. +func (q *chunkQueue) nextUp() (uint32, error) { + if q.snapshot == nil { + return 0, errDone + } + for i := uint32(0); i < q.snapshot.Chunks; i++ { + if !q.chunkReturned[i] { + return i, nil + } + } + return 0, errDone +} + +// Retry schedules a chunk to be retried, without refetching it. +func (q *chunkQueue) Retry(index uint32) { + q.Lock() + defer q.Unlock() + delete(q.chunkReturned, index) +} + +// RetryAll schedules all chunks to be retried, without refetching them. +func (q *chunkQueue) RetryAll() { + q.Lock() + defer q.Unlock() + q.chunkReturned = make(map[uint32]bool) +} + +// Size returns the total number of chunks for the snapshot and queue, or 0 when closed. +func (q *chunkQueue) Size() uint32 { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return 0 + } + return q.snapshot.Chunks +} + +// WaitFor returns a channel that receives a chunk index when it arrives in the queue, or +// immediately if it has already arrived. The channel is closed without a value if the queue is +// closed or if the chunk index is not valid. +func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { + q.Lock() + defer q.Unlock() + ch := make(chan uint32, 1) + switch { + case q.snapshot == nil: + close(ch) + case index >= q.snapshot.Chunks: + close(ch) + case q.chunkFiles[index] != "": + ch <- index + close(ch) + default: + if q.waiters[index] == nil { + q.waiters[index] = make([]chan<- uint32, 0) + } + q.waiters[index] = append(q.waiters[index], ch) + } + return ch +} diff --git a/statesync/chunks_test.go b/statesync/chunks_test.go new file mode 100644 index 000000000..2b9a5d751 --- /dev/null +++ b/statesync/chunks_test.go @@ -0,0 +1,551 @@ +package statesync + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/p2p" +) + +func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { + snapshot := &snapshot{ + Height: 3, + Format: 1, + Chunks: 5, + Hash: []byte{7}, + Metadata: nil, + } + queue, err := newChunkQueue(snapshot, "") + require.NoError(t, err) + teardown := func() { + err := queue.Close() + require.NoError(t, err) + } + return queue, teardown +} + +func TestNewChunkQueue_TempDir(t *testing.T) { + snapshot := &snapshot{ + Height: 3, + Format: 1, + Chunks: 5, + Hash: []byte{7}, + Metadata: nil, + } + dir, err := ioutil.TempDir("", "newchunkqueue") + require.NoError(t, err) + defer os.RemoveAll(dir) + queue, err := newChunkQueue(snapshot, dir) + require.NoError(t, err) + + files, err := ioutil.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, files, 1) + + err = queue.Close() + require.NoError(t, err) + + files, err = ioutil.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, files, 0) +} + +func TestChunkQueue(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Adding the first chunk should be fine + added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.True(t, added) + + // Adding the last chunk should also be fine + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) + require.NoError(t, err) + assert.True(t, added) + + // Adding the first or last chunks again should return false + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) + require.NoError(t, err) + assert.False(t, added) + + // Adding the remaining chunks in reverse should be fine + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}}) + require.NoError(t, err) + assert.True(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) + require.NoError(t, err) + assert.True(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + assert.True(t, added) + + // At this point, we should be able to retrieve them all via Next + for i := 0; i < 5; i++ { + c, err := queue.Next() + require.NoError(t, err) + assert.Equal(t, &chunk{Height: 3, Format: 1, Index: uint32(i), Chunk: []byte{3, 1, byte(i)}}, c) + } + _, err = queue.Next() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // It should still be possible to try to add chunks (which will be ignored) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + // After closing the queue it will also return false + err = queue.Close() + require.NoError(t, err) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + // Closing the queue again should also be fine + err = queue.Close() + require.NoError(t, err) +} + +func TestChunkQueue_Add_ChunkErrors(t *testing.T) { + testcases := map[string]struct { + chunk *chunk + }{ + "nil chunk": {nil}, + "nil body": {&chunk{Height: 3, Format: 1, Index: 0, Chunk: nil}}, + "wrong height": {&chunk{Height: 9, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}}, + "wrong format": {&chunk{Height: 3, Format: 9, Index: 0, Chunk: []byte{3, 1, 0}}}, + "invalid index": {&chunk{Height: 3, Format: 1, Index: 5, Chunk: []byte{3, 1, 0}}}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + _, err := queue.Add(tc.chunk) + require.Error(t, err) + }) + } +} + +func TestChunkQueue_Allocate(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + for i := uint32(0); i < queue.Size(); i++ { + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, i, index) + } + + _, err := queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) + + for i := uint32(0); i < queue.Size(); i++ { + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + } + + // After all chunks have been allocated and retrieved, discarding a chunk will reallocate it. + err = queue.Discard(2) + require.NoError(t, err) + + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 2, index) + _, err = queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // Discarding a chunk the closing the queue will return errDone. + err = queue.Discard(2) + require.NoError(t, err) + err = queue.Close() + require.NoError(t, err) + _, err = queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Discard(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Add a few chunks to the queue and fetch a couple + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{byte(1)}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{byte(2)}}) + require.NoError(t, err) + + c, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, c.Index) + + // Discarding the first chunk and re-adding it should cause it to be returned + // immediately by Next(), before procceeding with chunk 2 + err = queue.Discard(0) + require.NoError(t, err) + added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) + require.NoError(t, err) + assert.True(t, added) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 2, c.Index) + + // Discard then allocate, add and fetch all chunks + for i := uint32(0); i < queue.Size(); i++ { + err := queue.Discard(i) + require.NoError(t, err) + } + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, i, c.Index) + } + + // Discarding a non-existent chunk does nothing. + err = queue.Discard(99) + require.NoError(t, err) + + // When discard a couple of chunks, we should be able to allocate, add, and fetch them again. + err = queue.Discard(3) + require.NoError(t, err) + err = queue.Discard(1) + require.NoError(t, err) + + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 1, index) + index, err = queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 3, index) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3}}) + require.NoError(t, err) + assert.True(t, added) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) + + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, chunk.Index) + + chunk, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 3, chunk.Index) + + _, err = queue.Next() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // After closing the queue, discarding does nothing + err = queue.Close() + require.NoError(t, err) + err = queue.Discard(2) + require.NoError(t, err) +} + +func TestChunkQueue_DiscardSender(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + senders := []p2p.ID{"a", "b", "c"} + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{ + Height: 3, + Format: 1, + Index: i, + Chunk: []byte{byte(i)}, + Sender: senders[int(i)%len(senders)], + }) + require.NoError(t, err) + } + + // Fetch the first three chunks + for i := uint32(0); i < 3; i++ { + _, err := queue.Next() + require.NoError(t, err) + } + + // Discarding an unknown sender should do nothing + err := queue.DiscardSender("x") + require.NoError(t, err) + _, err = queue.Allocate() + assert.Equal(t, errDone, err) + + // Discarding sender b should discard chunk 4, but not chunk 1 which has already been + // returned. + err = queue.DiscardSender("b") + require.NoError(t, err) + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 4, index) + _, err = queue.Allocate() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_GetSender(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: p2p.ID("a")}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: p2p.ID("b")}) + require.NoError(t, err) + + assert.EqualValues(t, "a", queue.GetSender(0)) + assert.EqualValues(t, "b", queue.GetSender(1)) + assert.EqualValues(t, "", queue.GetSender(2)) + + // After the chunk has been processed, we should still know who the sender was + chunk, err := queue.Next() + require.NoError(t, err) + require.NotNil(t, chunk) + require.EqualValues(t, 0, chunk.Index) + assert.EqualValues(t, "a", queue.GetSender(0)) +} + +func TestChunkQueue_Next(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Next should block waiting for the next chunks, even when given out of order. + chNext := make(chan *chunk, 10) + go func() { + for { + c, err := queue.Next() + if err == errDone { + close(chNext) + break + } + require.NoError(t, err) + chNext <- c + } + }() + + assert.Empty(t, chNext) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}) + require.NoError(t, err) + select { + case <-chNext: + assert.Fail(t, "channel should be empty") + default: + } + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}) + require.NoError(t, err) + + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}, + <-chNext) + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}) + require.NoError(t, err) + select { + case <-chNext: + assert.Fail(t, "channel should be empty") + default: + } + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}) + require.NoError(t, err) + + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}, + <-chNext) + + _, ok := <-chNext + assert.False(t, ok, "channel should be closed") + + // Calling next on a finished queue should return done + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Next_Closed(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Calling Next on a closed queue should return done + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + err = queue.Close() + require.NoError(t, err) + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Retry(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = queue.Next() + require.NoError(t, err) + } + + // Retrying a couple of chunks makes Next() return them, but they are not allocatable + queue.Retry(3) + queue.Retry(1) + + _, err := queue.Allocate() + assert.Equal(t, errDone, err) + + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, chunk.Index) + + chunk, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 3, chunk.Index) + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_RetryAll(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = queue.Next() + require.NoError(t, err) + } + + _, err := queue.Next() + assert.Equal(t, errDone, err) + + queue.RetryAll() + + _, err = queue.Allocate() + assert.Equal(t, errDone, err) + + for i := uint32(0); i < queue.Size(); i++ { + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, i, chunk.Index) + } + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Size(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + assert.EqualValues(t, 5, queue.Size()) + + err := queue.Close() + require.NoError(t, err) + assert.EqualValues(t, 0, queue.Size()) +} + +func TestChunkQueue_WaitFor(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + waitFor1 := queue.WaitFor(1) + waitFor4 := queue.WaitFor(4) + + // Adding 0 and 2 should not trigger waiters + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) + require.NoError(t, err) + select { + case <-waitFor1: + require.Fail(t, "WaitFor(1) should not trigger on 0 or 2") + case <-waitFor4: + require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") + default: + } + + // Adding 1 should trigger WaitFor(1), but not WaitFor(4). The channel should be closed. + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + assert.EqualValues(t, 1, <-waitFor1) + _, ok := <-waitFor1 + assert.False(t, ok) + select { + case <-waitFor4: + require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") + default: + } + + // Fetch the first chunk. At this point, waiting for either 0 (retrieved from pool) or 1 + // (queued in pool) should immediately return true. + c, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + + w := queue.WaitFor(0) + assert.EqualValues(t, 0, <-w) + _, ok = <-w + assert.False(t, ok) + + w = queue.WaitFor(1) + assert.EqualValues(t, 1, <-w) + _, ok = <-w + assert.False(t, ok) + + // Close the queue. This should cause the waiter for 4 to close, and also cause any future + // waiters to get closed channels. + err = queue.Close() + require.NoError(t, err) + _, ok = <-waitFor4 + assert.False(t, ok) + + w = queue.WaitFor(3) + _, ok = <-w + assert.False(t, ok) +} diff --git a/statesync/messages.go b/statesync/messages.go new file mode 100644 index 000000000..b07227bbf --- /dev/null +++ b/statesync/messages.go @@ -0,0 +1,97 @@ +package statesync + +import ( + "errors" + "fmt" + + "github.com/gogo/protobuf/proto" + + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" +) + +const ( + // snapshotMsgSize is the maximum size of a snapshotResponseMessage + snapshotMsgSize = int(4e6) + // chunkMsgSize is the maximum size of a chunkResponseMessage + chunkMsgSize = int(16e6) +) + +// mustEncodeMsg encodes a Protobuf message, panicing on error. +func mustEncodeMsg(pb proto.Message) []byte { + msg := ssproto.Message{} + switch pb := pb.(type) { + case *ssproto.ChunkRequest: + msg.Sum = &ssproto.Message_ChunkRequest{ChunkRequest: pb} + case *ssproto.ChunkResponse: + msg.Sum = &ssproto.Message_ChunkResponse{ChunkResponse: pb} + case *ssproto.SnapshotsRequest: + msg.Sum = &ssproto.Message_SnapshotsRequest{SnapshotsRequest: pb} + case *ssproto.SnapshotsResponse: + msg.Sum = &ssproto.Message_SnapshotsResponse{SnapshotsResponse: pb} + default: + panic(fmt.Errorf("unknown message type %T", pb)) + } + bz, err := msg.Marshal() + if err != nil { + panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) + } + return bz +} + +// decodeMsg decodes a Protobuf message. +func decodeMsg(bz []byte) (proto.Message, error) { + pb := &ssproto.Message{} + err := proto.Unmarshal(bz, pb) + if err != nil { + return nil, err + } + switch msg := pb.Sum.(type) { + case *ssproto.Message_ChunkRequest: + return msg.ChunkRequest, nil + case *ssproto.Message_ChunkResponse: + return msg.ChunkResponse, nil + case *ssproto.Message_SnapshotsRequest: + return msg.SnapshotsRequest, nil + case *ssproto.Message_SnapshotsResponse: + return msg.SnapshotsResponse, nil + default: + return nil, fmt.Errorf("unknown message type %T", msg) + } +} + +// validateMsg validates a message. +func validateMsg(pb proto.Message) error { + if pb == nil { + return errors.New("message cannot be nil") + } + switch msg := pb.(type) { + case *ssproto.ChunkRequest: + if msg.Height == 0 { + return errors.New("height cannot be 0") + } + case *ssproto.ChunkResponse: + if msg.Height == 0 { + return errors.New("height cannot be 0") + } + if msg.Missing && len(msg.Chunk) > 0 { + return errors.New("missing chunk cannot have contents") + } + if !msg.Missing && msg.Chunk == nil { + return errors.New("chunk cannot be nil") + } + case *ssproto.SnapshotsRequest: + case *ssproto.SnapshotsResponse: + if msg.Height == 0 { + return errors.New("height cannot be 0") + } + if len(msg.Hash) == 0 { + return errors.New("snapshot has no hash") + } + if msg.Chunks == 0 { + return errors.New("snapshot has no chunks") + } + default: + return fmt.Errorf("unknown message type %T", msg) + } + return nil +} diff --git a/statesync/messages_test.go b/statesync/messages_test.go new file mode 100644 index 000000000..2a05f8d79 --- /dev/null +++ b/statesync/messages_test.go @@ -0,0 +1,107 @@ +package statesync + +import ( + "encoding/hex" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/require" + + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" +) + +func TestValidateMsg(t *testing.T) { + testcases := map[string]struct { + msg proto.Message + valid bool + }{ + "nil": {nil, false}, + "unrelated": {&tmproto.Block{}, false}, + + "ChunkRequest valid": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, true}, + "ChunkRequest 0 height": {&ssproto.ChunkRequest{Height: 0, Format: 1, Index: 1}, false}, + "ChunkRequest 0 format": {&ssproto.ChunkRequest{Height: 1, Format: 0, Index: 1}, true}, + "ChunkRequest 0 chunk": {&ssproto.ChunkRequest{Height: 1, Format: 1, Index: 0}, true}, + + "ChunkResponse valid": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}, + true}, + "ChunkResponse 0 height": { + &ssproto.ChunkResponse{Height: 0, Format: 1, Index: 1, Chunk: []byte{1}}, + false}, + "ChunkResponse 0 format": { + &ssproto.ChunkResponse{Height: 1, Format: 0, Index: 1, Chunk: []byte{1}}, + true}, + "ChunkResponse 0 chunk": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}, + true}, + "ChunkResponse empty body": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, + true}, + "ChunkResponse nil body": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}, + false}, + "ChunkResponse missing": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, + true}, + "ChunkResponse missing with empty": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{}}, + true}, + "ChunkResponse missing with body": { + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{1}}, + false}, + + "SnapshotsRequest valid": {&ssproto.SnapshotsRequest{}, true}, + + "SnapshotsResponse valid": { + &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{1}}, + true}, + "SnapshotsResponse 0 height": { + &ssproto.SnapshotsResponse{Height: 0, Format: 1, Chunks: 2, Hash: []byte{1}}, + false}, + "SnapshotsResponse 0 format": { + &ssproto.SnapshotsResponse{Height: 1, Format: 0, Chunks: 2, Hash: []byte{1}}, + true}, + "SnapshotsResponse 0 chunks": { + &ssproto.SnapshotsResponse{Height: 1, Format: 1, Hash: []byte{1}}, + false}, + "SnapshotsResponse no hash": { + &ssproto.SnapshotsResponse{Height: 1, Format: 1, Chunks: 2, Hash: []byte{}}, + false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := validateMsg(tc.msg) + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +//nolint:lll // ignore line length +func TestStateSyncVectors(t *testing.T) { + + testCases := []struct { + testName string + msg proto.Message + expBytes string + }{ + {"SnapshotsRequest", &ssproto.SnapshotsRequest{}, "0a00"}, + {"SnapshotsResponse", &ssproto.SnapshotsResponse{Height: 1, Format: 2, Chunks: 3, Hash: []byte("chuck hash"), Metadata: []byte("snapshot metadata")}, "1225080110021803220a636875636b20686173682a11736e617073686f74206d65746164617461"}, + {"ChunkRequest", &ssproto.ChunkRequest{Height: 1, Format: 2, Index: 3}, "1a06080110021803"}, + {"ChunkResponse", &ssproto.ChunkResponse{Height: 1, Format: 2, Index: 3, Chunk: []byte("it's a chunk")}, "2214080110021803220c697427732061206368756e6b"}, + } + + for _, tc := range testCases { + tc := tc + + bz := mustEncodeMsg(tc.msg) + + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + } +} diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go new file mode 100644 index 000000000..888553165 --- /dev/null +++ b/statesync/mocks/state_provider.go @@ -0,0 +1,84 @@ +// Code generated by mockery v2.3.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/state" + + types "github.com/tendermint/tendermint/types" +) + +// StateProvider is an autogenerated mock type for the StateProvider type +type StateProvider struct { + mock.Mock +} + +// AppHash provides a mock function with given fields: ctx, height +func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { + ret := _m.Called(ctx, height) + + var r0 []byte + if rf, ok := ret.Get(0).(func(context.Context, uint64) []byte); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: ctx, height +func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { + ret := _m.Called(ctx, height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(context.Context, uint64) *types.Commit); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// State provides a mock function with given fields: ctx, height +func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, error) { + ret := _m.Called(ctx, height) + + var r0 state.State + if rf, ok := ret.Get(0).(func(context.Context, uint64) state.State); ok { + r0 = rf(ctx, height) + } else { + r0 = ret.Get(0).(state.State) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/statesync/reactor.go b/statesync/reactor.go new file mode 100644 index 000000000..0abd91cf1 --- /dev/null +++ b/statesync/reactor.go @@ -0,0 +1,269 @@ +package statesync + +import ( + "errors" + "sort" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +const ( + // SnapshotChannel exchanges snapshot metadata + SnapshotChannel = byte(0x60) + // ChunkChannel exchanges chunk contents + ChunkChannel = byte(0x61) + // recentSnapshots is the number of recent snapshots to send and receive per peer. + recentSnapshots = 10 +) + +// Reactor handles state sync, both restoring snapshots for the local node and serving snapshots +// for other nodes. +type Reactor struct { + p2p.BaseReactor + + conn proxy.AppConnSnapshot + connQuery proxy.AppConnQuery + tempDir string + + // This will only be set when a state sync is in progress. It is used to feed received + // snapshots and chunks into the sync. + mtx tmsync.RWMutex + syncer *syncer +} + +// NewReactor creates a new state sync reactor. +func NewReactor(conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, tempDir string) *Reactor { + r := &Reactor{ + conn: conn, + connQuery: connQuery, + } + r.BaseReactor = *p2p.NewBaseReactor("StateSync", r) + return r +} + +// GetChannels implements p2p.Reactor. +func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: SnapshotChannel, + Priority: 3, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + }, + { + ID: ChunkChannel, + Priority: 1, + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + }, + } +} + +// OnStart implements p2p.Reactor. +func (r *Reactor) OnStart() error { + return nil +} + +// AddPeer implements p2p.Reactor. +func (r *Reactor) AddPeer(peer p2p.Peer) { + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer != nil { + r.syncer.AddPeer(peer) + } +} + +// RemovePeer implements p2p.Reactor. +func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer != nil { + r.syncer.RemovePeer(peer) + } +} + +// Receive implements p2p.Reactor. +// XXX: do not call any methods that can block or incur heavy processing. +// https://github.com/tendermint/tendermint/issues/2888 +func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + if !r.IsRunning() { + return + } + + msg, err := decodeMsg(msgBytes) + if err != nil { + r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + r.Switch.StopPeerForError(src, err) + return + } + err = validateMsg(msg) + if err != nil { + r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err) + r.Switch.StopPeerForError(src, err) + return + } + + switch chID { + case SnapshotChannel: + switch msg := msg.(type) { + case *ssproto.SnapshotsRequest: + snapshots, err := r.recentSnapshots(recentSnapshots) + if err != nil { + r.Logger.Error("Failed to fetch snapshots", "err", err) + return + } + for _, snapshot := range snapshots { + r.Logger.Debug("Advertising snapshot", "height", snapshot.Height, + "format", snapshot.Format, "peer", src.ID()) + src.Send(chID, mustEncodeMsg(&ssproto.SnapshotsResponse{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + })) + } + + case *ssproto.SnapshotsResponse: + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer == nil { + r.Logger.Debug("Received unexpected snapshot, no state sync in progress") + return + } + r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID()) + _, err := r.syncer.AddSnapshot(src, &snapshot{ + Height: msg.Height, + Format: msg.Format, + Chunks: msg.Chunks, + Hash: msg.Hash, + Metadata: msg.Metadata, + }) + if err != nil { + r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format, + "peer", src.ID(), "err", err) + return + } + + default: + r.Logger.Error("Received unknown message %T", msg) + } + + case ChunkChannel: + switch msg := msg.(type) { + case *ssproto.ChunkRequest: + r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{ + Height: msg.Height, + Format: msg.Format, + Chunk: msg.Index, + }) + if err != nil { + r.Logger.Error("Failed to load chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "err", err) + return + } + r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + src.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkResponse{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: resp.Chunk, + Missing: resp.Chunk == nil, + })) + + case *ssproto.ChunkResponse: + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer == nil { + r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID()) + return + } + r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + _, err := r.syncer.AddChunk(&chunk{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: msg.Chunk, + Sender: src.ID(), + }) + if err != nil { + r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "err", err) + return + } + + default: + r.Logger.Error("Received unknown message %T", msg) + } + + default: + r.Logger.Error("Received message on invalid channel %x", chID) + } +} + +// recentSnapshots fetches the n most recent snapshots from the app +func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { + resp, err := r.conn.ListSnapshotsSync(abci.RequestListSnapshots{}) + if err != nil { + return nil, err + } + sort.Slice(resp.Snapshots, func(i, j int) bool { + a := resp.Snapshots[i] + b := resp.Snapshots[j] + switch { + case a.Height > b.Height: + return true + case a.Height == b.Height && a.Format > b.Format: + return true + default: + return false + } + }) + snapshots := make([]*snapshot, 0, n) + for i, s := range resp.Snapshots { + if i >= recentSnapshots { + break + } + snapshots = append(snapshots, &snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + }) + } + return snapshots, nil +} + +// Sync runs a state sync, returning the new state and last commit at the snapshot height. +// The caller must store the state and commit in the state database and block store. +func (r *Reactor) Sync(stateProvider StateProvider, discoveryTime time.Duration) (sm.State, *types.Commit, error) { + r.mtx.Lock() + if r.syncer != nil { + r.mtx.Unlock() + return sm.State{}, nil, errors.New("a state sync is already in progress") + } + r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir) + r.mtx.Unlock() + + // Request snapshots from all currently connected peers + r.Logger.Debug("Requesting snapshots from known peers") + r.Switch.Broadcast(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + + state, commit, err := r.syncer.SyncAny(discoveryTime) + r.mtx.Lock() + r.syncer = nil + r.mtx.Unlock() + return state, commit, err +} diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go new file mode 100644 index 000000000..49d8376b8 --- /dev/null +++ b/statesync/reactor_test.go @@ -0,0 +1,157 @@ +package statesync + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + proxymocks "github.com/tendermint/tendermint/proxy/mocks" +) + +func TestReactor_Receive_ChunkRequest(t *testing.T) { + testcases := map[string]struct { + request *ssproto.ChunkRequest + chunk []byte + expectResponse *ssproto.ChunkResponse + }{ + "chunk is returned": { + &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + []byte{1, 2, 3}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}}, + "empty chunk is returned, as nil": { + &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + []byte{}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}}, + "nil (missing) chunk is returned as missing": { + &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, + nil, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true}, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Mock ABCI connection to return local snapshots + conn := &proxymocks.AppConnSnapshot{} + conn.On("LoadSnapshotChunkSync", abci.RequestLoadSnapshotChunk{ + Height: tc.request.Height, + Format: tc.request.Format, + Chunk: tc.request.Index, + }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) + + // Mock peer to store response, if found + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + var response *ssproto.ChunkResponse + if tc.expectResponse != nil { + peer.On("Send", ChunkChannel, mock.Anything).Run(func(args mock.Arguments) { + msg, err := decodeMsg(args[1].([]byte)) + require.NoError(t, err) + response = msg.(*ssproto.ChunkResponse) + }).Return(true) + } + + // Start a reactor and send a ssproto.ChunkRequest, then wait for and check response + r := NewReactor(conn, nil, "") + err := r.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := r.Stop(); err != nil { + t.Error(err) + } + }) + + r.Receive(ChunkChannel, peer, mustEncodeMsg(tc.request)) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, tc.expectResponse, response) + + conn.AssertExpectations(t) + peer.AssertExpectations(t) + }) + } +} + +func TestReactor_Receive_SnapshotsRequest(t *testing.T) { + testcases := map[string]struct { + snapshots []*abci.Snapshot + expectResponses []*ssproto.SnapshotsResponse + }{ + "no snapshots": {nil, []*ssproto.SnapshotsResponse{}}, + ">10 unordered snapshots": { + []*abci.Snapshot{ + {Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}}, + {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 1, Format: 1, Chunks: 7, Hash: []byte{1, 1}, Metadata: []byte{4}}, + {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, + {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, + }, + []*ssproto.SnapshotsResponse{ + {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, + {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, + }, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Mock ABCI connection to return local snapshots + conn := &proxymocks.AppConnSnapshot{} + conn.On("ListSnapshotsSync", abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + Snapshots: tc.snapshots, + }, nil) + + // Mock peer to catch responses and store them in a slice + responses := []*ssproto.SnapshotsResponse{} + peer := &p2pmocks.Peer{} + if len(tc.expectResponses) > 0 { + peer.On("ID").Return(p2p.ID("id")) + peer.On("Send", SnapshotChannel, mock.Anything).Run(func(args mock.Arguments) { + msg, err := decodeMsg(args[1].([]byte)) + require.NoError(t, err) + responses = append(responses, msg.(*ssproto.SnapshotsResponse)) + }).Return(true) + } + + // Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses + r := NewReactor(conn, nil, "") + err := r.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := r.Stop(); err != nil { + t.Error(err) + } + }) + + r.Receive(SnapshotChannel, peer, mustEncodeMsg(&ssproto.SnapshotsRequest{})) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, tc.expectResponses, responses) + + conn.AssertExpectations(t) + peer.AssertExpectations(t) + }) + } +} diff --git a/statesync/snapshots.go b/statesync/snapshots.go new file mode 100644 index 000000000..3eca5c0be --- /dev/null +++ b/statesync/snapshots.go @@ -0,0 +1,268 @@ +package statesync + +import ( + "context" + "crypto/sha256" + "fmt" + "math/rand" + "sort" + "time" + + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" +) + +// snapshotKey is a snapshot key used for lookups. +type snapshotKey [sha256.Size]byte + +// snapshot contains data about a snapshot. +type snapshot struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte + + trustedAppHash []byte // populated by light client +} + +// Key generates a snapshot key, used for lookups. It takes into account not only the height and +// format, but also the chunks, hash, and metadata in case peers have generated snapshots in a +// non-deterministic manner. All fields must be equal for the snapshot to be considered the same. +func (s *snapshot) Key() snapshotKey { + // Hash.Write() never returns an error. + hasher := sha256.New() + hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks))) //nolint:errcheck // ignore error + hasher.Write(s.Hash) //nolint:errcheck // ignore error + hasher.Write(s.Metadata) //nolint:errcheck // ignore error + var key snapshotKey + copy(key[:], hasher.Sum(nil)) + return key +} + +// snapshotPool discovers and aggregates snapshots across peers. +type snapshotPool struct { + stateProvider StateProvider + + tmsync.Mutex + snapshots map[snapshotKey]*snapshot + snapshotPeers map[snapshotKey]map[p2p.ID]p2p.Peer + + // indexes for fast searches + formatIndex map[uint32]map[snapshotKey]bool + heightIndex map[uint64]map[snapshotKey]bool + peerIndex map[p2p.ID]map[snapshotKey]bool + + // blacklists for rejected items + formatBlacklist map[uint32]bool + peerBlacklist map[p2p.ID]bool + snapshotBlacklist map[snapshotKey]bool +} + +// newSnapshotPool creates a new snapshot pool. The state source is used for +func newSnapshotPool(stateProvider StateProvider) *snapshotPool { + return &snapshotPool{ + stateProvider: stateProvider, + snapshots: make(map[snapshotKey]*snapshot), + snapshotPeers: make(map[snapshotKey]map[p2p.ID]p2p.Peer), + formatIndex: make(map[uint32]map[snapshotKey]bool), + heightIndex: make(map[uint64]map[snapshotKey]bool), + peerIndex: make(map[p2p.ID]map[snapshotKey]bool), + formatBlacklist: make(map[uint32]bool), + peerBlacklist: make(map[p2p.ID]bool), + snapshotBlacklist: make(map[snapshotKey]bool), + } +} + +// Add adds a snapshot to the pool, unless the peer has already sent recentSnapshots snapshots. It +// returns true if this was a new, non-blacklisted snapshot. The snapshot height is verified using +// the light client, and the expected app hash is set for the snapshot. +func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + appHash, err := p.stateProvider.AppHash(ctx, snapshot.Height) + if err != nil { + return false, err + } + snapshot.trustedAppHash = appHash + key := snapshot.Key() + + p.Lock() + defer p.Unlock() + + switch { + case p.formatBlacklist[snapshot.Format]: + return false, nil + case p.peerBlacklist[peer.ID()]: + return false, nil + case p.snapshotBlacklist[key]: + return false, nil + case len(p.peerIndex[peer.ID()]) >= recentSnapshots: + return false, nil + } + + if p.snapshotPeers[key] == nil { + p.snapshotPeers[key] = make(map[p2p.ID]p2p.Peer) + } + p.snapshotPeers[key][peer.ID()] = peer + + if p.peerIndex[peer.ID()] == nil { + p.peerIndex[peer.ID()] = make(map[snapshotKey]bool) + } + p.peerIndex[peer.ID()][key] = true + + if p.snapshots[key] != nil { + return false, nil + } + p.snapshots[key] = snapshot + + if p.formatIndex[snapshot.Format] == nil { + p.formatIndex[snapshot.Format] = make(map[snapshotKey]bool) + } + p.formatIndex[snapshot.Format][key] = true + + if p.heightIndex[snapshot.Height] == nil { + p.heightIndex[snapshot.Height] = make(map[snapshotKey]bool) + } + p.heightIndex[snapshot.Height][key] = true + + return true, nil +} + +// Best returns the "best" currently known snapshot, if any. +func (p *snapshotPool) Best() *snapshot { + ranked := p.Ranked() + if len(ranked) == 0 { + return nil + } + return ranked[0] +} + +// GetPeer returns a random peer for a snapshot, if any. +func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.Peer { + peers := p.GetPeers(snapshot) + if len(peers) == 0 { + return nil + } + return peers[rand.Intn(len(peers))] // nolint:gosec // G404: Use of weak random number generator +} + +// GetPeers returns the peers for a snapshot. +func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.Peer { + key := snapshot.Key() + p.Lock() + defer p.Unlock() + + peers := make([]p2p.Peer, 0, len(p.snapshotPeers[key])) + for _, peer := range p.snapshotPeers[key] { + peers = append(peers, peer) + } + // sort results, for testability (otherwise order is random, so tests randomly fail) + sort.Slice(peers, func(a int, b int) bool { + return peers[a].ID() < peers[b].ID() + }) + return peers +} + +// Ranked returns a list of snapshots ranked by preference. The current heuristic is very naïve, +// preferring the snapshot with the greatest height, then greatest format, then greatest number of +// peers. This can be improved quite a lot. +func (p *snapshotPool) Ranked() []*snapshot { + p.Lock() + defer p.Unlock() + + candidates := make([]*snapshot, 0, len(p.snapshots)) + for _, snapshot := range p.snapshots { + candidates = append(candidates, snapshot) + } + + sort.Slice(candidates, func(i, j int) bool { + a := candidates[i] + b := candidates[j] + + switch { + case a.Height > b.Height: + return true + case a.Height < b.Height: + return false + case a.Format > b.Format: + return true + case a.Format < b.Format: + return false + case len(p.snapshotPeers[a.Key()]) > len(p.snapshotPeers[b.Key()]): + return true + default: + return false + } + }) + + return candidates +} + +// Reject rejects a snapshot. Rejected snapshots will never be used again. +func (p *snapshotPool) Reject(snapshot *snapshot) { + key := snapshot.Key() + p.Lock() + defer p.Unlock() + + p.snapshotBlacklist[key] = true + p.removeSnapshot(key) +} + +// RejectFormat rejects a snapshot format. It will never be used again. +func (p *snapshotPool) RejectFormat(format uint32) { + p.Lock() + defer p.Unlock() + + p.formatBlacklist[format] = true + for key := range p.formatIndex[format] { + p.removeSnapshot(key) + } +} + +// RejectPeer rejects a peer. It will never be used again. +func (p *snapshotPool) RejectPeer(peerID p2p.ID) { + if peerID == "" { + return + } + p.Lock() + defer p.Unlock() + + p.removePeer(peerID) + p.peerBlacklist[peerID] = true +} + +// RemovePeer removes a peer from the pool, and any snapshots that no longer have peers. +func (p *snapshotPool) RemovePeer(peerID p2p.ID) { + p.Lock() + defer p.Unlock() + p.removePeer(peerID) +} + +// removePeer removes a peer. The caller must hold the mutex lock. +func (p *snapshotPool) removePeer(peerID p2p.ID) { + for key := range p.peerIndex[peerID] { + delete(p.snapshotPeers[key], peerID) + if len(p.snapshotPeers[key]) == 0 { + p.removeSnapshot(key) + } + } + delete(p.peerIndex, peerID) +} + +// removeSnapshot removes a snapshot. The caller must hold the mutex lock. +func (p *snapshotPool) removeSnapshot(key snapshotKey) { + snapshot := p.snapshots[key] + if snapshot == nil { + return + } + + delete(p.snapshots, key) + delete(p.formatIndex[snapshot.Format], key) + delete(p.heightIndex[snapshot.Height], key) + for peerID := range p.snapshotPeers[key] { + delete(p.peerIndex[peerID], key) + } + delete(p.snapshotPeers, key) +} diff --git a/statesync/snapshots_test.go b/statesync/snapshots_test.go new file mode 100644 index 000000000..588c0ac31 --- /dev/null +++ b/statesync/snapshots_test.go @@ -0,0 +1,325 @@ +package statesync + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + "github.com/tendermint/tendermint/statesync/mocks" +) + +func TestSnapshot_Key(t *testing.T) { + testcases := map[string]struct { + modify func(*snapshot) + }{ + "new height": {func(s *snapshot) { s.Height = 9 }}, + "new format": {func(s *snapshot) { s.Format = 9 }}, + "new chunk count": {func(s *snapshot) { s.Chunks = 9 }}, + "new hash": {func(s *snapshot) { s.Hash = []byte{9} }}, + "no metadata": {func(s *snapshot) { s.Metadata = nil }}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + s := snapshot{ + Height: 3, + Format: 1, + Chunks: 7, + Hash: []byte{1, 2, 3}, + Metadata: []byte{255}, + } + before := s.Key() + tc.modify(&s) + after := s.Key() + assert.NotEqual(t, before, after) + }) + } +} + +func TestSnapshotPool_Add(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, uint64(1)).Return([]byte("app_hash"), nil) + + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + // Adding to the pool should work + pool := newSnapshotPool(stateProvider) + added, err := pool.Add(peer, &snapshot{ + Height: 1, + Format: 1, + Chunks: 1, + Hash: []byte{1}, + }) + require.NoError(t, err) + assert.True(t, added) + + // Adding again from a different peer should return false + otherPeer := &p2pmocks.Peer{} + otherPeer.On("ID").Return(p2p.ID("other")) + added, err = pool.Add(peer, &snapshot{ + Height: 1, + Format: 1, + Chunks: 1, + Hash: []byte{1}, + }) + require.NoError(t, err) + assert.False(t, added) + + // The pool should have populated the snapshot with the trusted app hash + snapshot := pool.Best() + require.NotNil(t, snapshot) + assert.Equal(t, []byte("app_hash"), snapshot.trustedAppHash) + + stateProvider.AssertExpectations(t) +} + +func TestSnapshotPool_GetPeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + _, err := pool.Add(peerA, s) + require.NoError(t, err) + _, err = pool.Add(peerB, s) + require.NoError(t, err) + _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + + // GetPeer currently picks a random peer, so lets run it until we've seen both. + seenA := false + seenB := false + for !seenA || !seenB { + peer := pool.GetPeer(s) + switch peer.ID() { + case p2p.ID("a"): + seenA = true + case p2p.ID("b"): + seenB = true + } + } + + // GetPeer should return nil for an unknown snapshot + peer := pool.GetPeer(&snapshot{Height: 9, Format: 9}) + assert.Nil(t, peer) +} + +func TestSnapshotPool_GetPeers(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + _, err := pool.Add(peerA, s) + require.NoError(t, err) + _, err = pool.Add(peerB, s) + require.NoError(t, err) + _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}) + require.NoError(t, err) + + peers := pool.GetPeers(s) + assert.Len(t, peers, 2) + assert.EqualValues(t, "a", peers[0].ID()) + assert.EqualValues(t, "b", peers[1].ID()) +} + +func TestSnapshotPool_Ranked_Best(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + // snapshots in expected order (best to worst). Highest height wins, then highest format. + // Snapshots with different chunk hashes are considered different, and the most peers is + // tie-breaker. + expectSnapshots := []struct { + snapshot *snapshot + peers []string + }{ + {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []string{"a", "b", "c"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a"}}, + {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []string{"a", "b"}}, + {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a", "b"}}, + {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []string{"a", "b", "c"}}, + } + + // Add snapshots in reverse order, to make sure the pool enforces some order. + for i := len(expectSnapshots) - 1; i >= 0; i-- { + for _, peerID := range expectSnapshots[i].peers { + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID(peerID)) + _, err := pool.Add(peer, expectSnapshots[i].snapshot) + require.NoError(t, err) + } + } + + // Ranked should return the snapshots in the same order + ranked := pool.Ranked() + assert.Len(t, ranked, len(expectSnapshots)) + for i := range ranked { + assert.Equal(t, expectSnapshots[i].snapshot, ranked[i]) + } + + // Check that best snapshots are returned in expected order + for i := range expectSnapshots { + snapshot := expectSnapshots[i].snapshot + require.Equal(t, snapshot, pool.Best()) + pool.Reject(snapshot) + } + assert.Nil(t, pool.Best()) +} + +func TestSnapshotPool_Reject(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + snapshots := []*snapshot{ + {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + } + for _, s := range snapshots { + _, err := pool.Add(peer, s) + require.NoError(t, err) + } + + pool.Reject(snapshots[0]) + assert.Equal(t, snapshots[1:], pool.Ranked()) + + added, err := pool.Add(peer, snapshots[0]) + require.NoError(t, err) + assert.False(t, added) + + added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) +} + +func TestSnapshotPool_RejectFormat(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + snapshots := []*snapshot{ + {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + } + for _, s := range snapshots { + _, err := pool.Add(peer, s) + require.NoError(t, err) + } + + pool.RejectFormat(1) + assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + + added, err := pool.Add(peer, &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.False(t, added) + assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + + added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) +} + +func TestSnapshotPool_RejectPeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} + s3 := &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{2}} + + _, err := pool.Add(peerA, s1) + require.NoError(t, err) + _, err = pool.Add(peerA, s2) + require.NoError(t, err) + + _, err = pool.Add(peerB, s2) + require.NoError(t, err) + _, err = pool.Add(peerB, s3) + require.NoError(t, err) + + pool.RejectPeer(peerA.ID()) + + assert.Empty(t, pool.GetPeers(s1)) + + peers2 := pool.GetPeers(s2) + assert.Len(t, peers2, 1) + assert.EqualValues(t, "b", peers2[0].ID()) + + peers3 := pool.GetPeers(s2) + assert.Len(t, peers3, 1) + assert.EqualValues(t, "b", peers3[0].ID()) + + // it should no longer be possible to add the peer back + _, err = pool.Add(peerA, s1) + require.NoError(t, err) + assert.Empty(t, pool.GetPeers(s1)) +} + +func TestSnapshotPool_RemovePeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} + + _, err := pool.Add(peerA, s1) + require.NoError(t, err) + _, err = pool.Add(peerA, s2) + require.NoError(t, err) + _, err = pool.Add(peerB, s1) + require.NoError(t, err) + + pool.RemovePeer(peerA.ID()) + + peers1 := pool.GetPeers(s1) + assert.Len(t, peers1, 1) + assert.EqualValues(t, "b", peers1[0].ID()) + + peers2 := pool.GetPeers(s2) + assert.Empty(t, peers2) + + // it should still be possible to add the peer back + _, err = pool.Add(peerA, s1) + require.NoError(t, err) + peers1 = pool.GetPeers(s1) + assert.Len(t, peers1, 2) + assert.EqualValues(t, "a", peers1[0].ID()) + assert.EqualValues(t, "b", peers1[1].ID()) +} diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go new file mode 100644 index 000000000..4b1c75e32 --- /dev/null +++ b/statesync/stateprovider.go @@ -0,0 +1,202 @@ +package statesync + +import ( + "context" + "fmt" + "strings" + "time" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/light" + lightprovider "github.com/tendermint/tendermint/light/provider" + lighthttp "github.com/tendermint/tendermint/light/provider/http" + lightrpc "github.com/tendermint/tendermint/light/rpc" + lightdb "github.com/tendermint/tendermint/light/store/db" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +//go:generate mockery --case underscore --name StateProvider + +// StateProvider is a provider of trusted state data for bootstrapping a node. This refers +// to the state.State object, not the state machine. +type StateProvider interface { + // AppHash returns the app hash after the given height has been committed. + AppHash(ctx context.Context, height uint64) ([]byte, error) + // Commit returns the commit at the given height. + Commit(ctx context.Context, height uint64) (*types.Commit, error) + // State returns a state object at the given height. + State(ctx context.Context, height uint64) (sm.State, error) +} + +// lightClientStateProvider is a state provider using the light client. +type lightClientStateProvider struct { + tmsync.Mutex // light.Client is not concurrency-safe + lc *light.Client + version tmstate.Version + initialHeight int64 + providers map[lightprovider.Provider]string +} + +// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients. +func NewLightClientStateProvider( + ctx context.Context, + chainID string, + version tmstate.Version, + initialHeight int64, + servers []string, + trustOptions light.TrustOptions, + logger log.Logger, +) (StateProvider, error) { + if len(servers) < 2 { + return nil, fmt.Errorf("at least 2 RPC servers are required, got %v", len(servers)) + } + + providers := make([]lightprovider.Provider, 0, len(servers)) + providerRemotes := make(map[lightprovider.Provider]string) + for _, server := range servers { + client, err := rpcClient(server) + if err != nil { + return nil, fmt.Errorf("failed to set up RPC client: %w", err) + } + provider := lighthttp.NewWithClient(chainID, client) + providers = append(providers, provider) + // We store the RPC addresses keyed by provider, so we can find the address of the primary + // provider used by the light client and use it to fetch consensus parameters. + providerRemotes[provider] = server + } + + lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], + lightdb.New(dbm.NewMemDB(), ""), light.Logger(logger), light.MaxRetryAttempts(5)) + if err != nil { + return nil, err + } + return &lightClientStateProvider{ + lc: lc, + version: version, + initialHeight: initialHeight, + providers: providerRemotes, + }, nil +} + +// AppHash implements StateProvider. +func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { + s.Lock() + defer s.Unlock() + + // We have to fetch the next height, which contains the app hash for the previous height. + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return nil, err + } + // We also try to fetch the blocks at height H and H+2, since we need these + // when building the state while restoring the snapshot. This avoids the race + // condition where we try to restore a snapshot before H+2 exists. + // + // FIXME This is a hack, since we can't add new methods to the interface without + // breaking it. We should instead have a Has(ctx, height) method which checks + // that the state provider has access to the necessary data for the height. + // We piggyback on AppHash() since it's called when adding snapshots to the pool. + _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return nil, err + } + _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return nil, err + } + return header.AppHash, nil +} + +// Commit implements StateProvider. +func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { + s.Lock() + defer s.Unlock() + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return nil, err + } + return header.Commit, nil +} + +// State implements StateProvider. +func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm.State, error) { + s.Lock() + defer s.Unlock() + + state := sm.State{ + ChainID: s.lc.ChainID(), + Version: s.version, + InitialHeight: s.initialHeight, + } + if state.InitialHeight == 0 { + state.InitialHeight = 1 + } + + // The snapshot height maps onto the state heights as follows: + // + // height: last block, i.e. the snapshotted height + // height+1: current block, i.e. the first block we'll process after the snapshot + // height+2: next block, i.e. the second block after the snapshot + // + // We need to fetch the NextValidators from height+2 because if the application changed + // the validator set at the snapshot height then this only takes effect at height+2. + lastLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return sm.State{}, err + } + curLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return sm.State{}, err + } + nextLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return sm.State{}, err + } + + state.LastBlockHeight = lastLightBlock.Height + state.LastBlockTime = lastLightBlock.Time + state.LastBlockID = lastLightBlock.Commit.BlockID + state.AppHash = curLightBlock.AppHash + state.LastResultsHash = curLightBlock.LastResultsHash + state.LastValidators = lastLightBlock.ValidatorSet + state.Validators = curLightBlock.ValidatorSet + state.NextValidators = nextLightBlock.ValidatorSet + state.LastHeightValidatorsChanged = nextLightBlock.Height + + // We'll also need to fetch consensus params via RPC, using light client verification. + primaryURL, ok := s.providers[s.lc.Primary()] + if !ok || primaryURL == "" { + return sm.State{}, fmt.Errorf("could not find address for primary light client provider") + } + primaryRPC, err := rpcClient(primaryURL) + if err != nil { + return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err) + } + rpcclient := lightrpc.NewClient(primaryRPC, s.lc) + result, err := rpcclient.ConsensusParams(ctx, &nextLightBlock.Height) + if err != nil { + return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w", + nextLightBlock.Height, err) + } + state.ConsensusParams = result.ConsensusParams + + return state, nil +} + +// rpcClient sets up a new RPC client +func rpcClient(server string) (*rpchttp.HTTP, error) { + if !strings.Contains(server, "://") { + server = "http://" + server + } + c, err := rpchttp.New(server, "/websocket") + if err != nil { + return nil, err + } + return c, nil +} diff --git a/statesync/syncer.go b/statesync/syncer.go new file mode 100644 index 000000000..8bf0f7f7b --- /dev/null +++ b/statesync/syncer.go @@ -0,0 +1,443 @@ +package statesync + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +const ( + // chunkFetchers is the number of concurrent chunk fetchers to run. + chunkFetchers = 4 + // chunkTimeout is the timeout while waiting for the next chunk from the chunk queue. + chunkTimeout = 2 * time.Minute + // requestTimeout is the timeout before rerequesting a chunk, possibly from a different peer. + chunkRequestTimeout = 10 * time.Second +) + +var ( + // errAbort is returned by Sync() when snapshot restoration is aborted. + errAbort = errors.New("state sync aborted") + // errRetrySnapshot is returned by Sync() when the snapshot should be retried. + errRetrySnapshot = errors.New("retry snapshot") + // errRejectSnapshot is returned by Sync() when the snapshot is rejected. + errRejectSnapshot = errors.New("snapshot was rejected") + // errRejectFormat is returned by Sync() when the snapshot format is rejected. + errRejectFormat = errors.New("snapshot format was rejected") + // errRejectSender is returned by Sync() when the snapshot sender is rejected. + errRejectSender = errors.New("snapshot sender was rejected") + // errVerifyFailed is returned by Sync() when app hash or last height verification fails. + errVerifyFailed = errors.New("verification failed") + // errTimeout is returned by Sync() when we've waited too long to receive a chunk. + errTimeout = errors.New("timed out waiting for chunk") + // errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled. + errNoSnapshots = errors.New("no suitable snapshots found") +) + +// syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to +// sync all snapshots in the pool (pausing to discover new ones), or Sync() to sync a specific +// snapshot. Snapshots and chunks are fed via AddSnapshot() and AddChunk() as appropriate. +type syncer struct { + logger log.Logger + stateProvider StateProvider + conn proxy.AppConnSnapshot + connQuery proxy.AppConnQuery + snapshots *snapshotPool + tempDir string + + mtx tmsync.RWMutex + chunks *chunkQueue +} + +// newSyncer creates a new syncer. +func newSyncer(logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, + stateProvider StateProvider, tempDir string) *syncer { + return &syncer{ + logger: logger, + stateProvider: stateProvider, + conn: conn, + connQuery: connQuery, + snapshots: newSnapshotPool(stateProvider), + tempDir: tempDir, + } +} + +// AddChunk adds a chunk to the chunk queue, if any. It returns false if the chunk has already +// been added to the queue, or an error if there's no sync in progress. +func (s *syncer) AddChunk(chunk *chunk) (bool, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + if s.chunks == nil { + return false, errors.New("no state sync in progress") + } + added, err := s.chunks.Add(chunk) + if err != nil { + return false, err + } + if added { + s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, + "chunk", chunk.Index) + } else { + s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, + "chunk", chunk.Index) + } + return added, nil +} + +// AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen +// snapshot was accepted and added. +func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) { + added, err := s.snapshots.Add(peer, snapshot) + if err != nil { + return false, err + } + if added { + s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + } + return added, nil +} + +// AddPeer adds a peer to the pool. For now we just keep it simple and send a single request +// to discover snapshots, later we may want to do retries and stuff. +func (s *syncer) AddPeer(peer p2p.Peer) { + s.logger.Debug("Requesting snapshots from peer", "peer", peer.ID()) + peer.Send(SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})) +} + +// RemovePeer removes a peer from the pool. +func (s *syncer) RemovePeer(peer p2p.Peer) { + s.logger.Debug("Removing peer from sync", "peer", peer.ID()) + s.snapshots.RemovePeer(peer.ID()) +} + +// SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to discover further +// snapshots if none were found and discoveryTime > 0. It returns the latest state and block commit +// which the caller must use to bootstrap the node. +func (s *syncer) SyncAny(discoveryTime time.Duration) (sm.State, *types.Commit, error) { + if discoveryTime > 0 { + s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) + time.Sleep(discoveryTime) + } + + // The app may ask us to retry a snapshot restoration, in which case we need to reuse + // the snapshot and chunk queue from the previous loop iteration. + var ( + snapshot *snapshot + chunks *chunkQueue + err error + ) + for { + // If not nil, we're going to retry restoration of the same snapshot. + if snapshot == nil { + snapshot = s.snapshots.Best() + chunks = nil + } + if snapshot == nil { + if discoveryTime == 0 { + return sm.State{}, nil, errNoSnapshots + } + s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) + time.Sleep(discoveryTime) + continue + } + if chunks == nil { + chunks, err = newChunkQueue(snapshot, s.tempDir) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to create chunk queue: %w", err) + } + defer chunks.Close() // in case we forget to close it elsewhere + } + + newState, commit, err := s.Sync(snapshot, chunks) + switch { + case err == nil: + return newState, commit, nil + + case errors.Is(err, errAbort): + return sm.State{}, nil, err + + case errors.Is(err, errRetrySnapshot): + chunks.RetryAll() + s.logger.Info("Retrying snapshot", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + continue + + case errors.Is(err, errTimeout): + s.snapshots.Reject(snapshot) + s.logger.Error("Timed out waiting for snapshot chunks, rejected snapshot", + "height", snapshot.Height, "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + + case errors.Is(err, errRejectSnapshot): + s.snapshots.Reject(snapshot) + s.logger.Info("Snapshot rejected", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + + case errors.Is(err, errRejectFormat): + s.snapshots.RejectFormat(snapshot.Format) + s.logger.Info("Snapshot format rejected", "format", snapshot.Format) + + case errors.Is(err, errRejectSender): + s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + for _, peer := range s.snapshots.GetPeers(snapshot) { + s.snapshots.RejectPeer(peer.ID()) + s.logger.Info("Snapshot sender rejected", "peer", peer.ID()) + } + + default: + return sm.State{}, nil, fmt.Errorf("snapshot restoration failed: %w", err) + } + + // Discard snapshot and chunks for next iteration + err = chunks.Close() + if err != nil { + s.logger.Error("Failed to clean up chunk queue", "err", err) + } + snapshot = nil + chunks = nil + } +} + +// Sync executes a sync for a specific snapshot, returning the latest state and block commit which +// the caller must use to bootstrap the node. +func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.Commit, error) { + s.mtx.Lock() + if s.chunks != nil { + s.mtx.Unlock() + return sm.State{}, nil, errors.New("a state sync is already in progress") + } + s.chunks = chunks + s.mtx.Unlock() + defer func() { + s.mtx.Lock() + s.chunks = nil + s.mtx.Unlock() + }() + + // Offer snapshot to ABCI app. + err := s.offerSnapshot(snapshot) + if err != nil { + return sm.State{}, nil, err + } + + // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context cancelled. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i := int32(0); i < chunkFetchers; i++ { + go s.fetchChunks(ctx, snapshot, chunks) + } + + pctx, pcancel := context.WithTimeout(context.Background(), 10*time.Second) + defer pcancel() + + // Optimistically build new state, so we don't discover any light client failures at the end. + state, err := s.stateProvider.State(pctx, snapshot.Height) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to build new state: %w", err) + } + commit, err := s.stateProvider.Commit(pctx, snapshot.Height) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to fetch commit: %w", err) + } + + // Restore snapshot + err = s.applyChunks(chunks) + if err != nil { + return sm.State{}, nil, err + } + + // Verify app and update app version + appVersion, err := s.verifyApp(snapshot) + if err != nil { + return sm.State{}, nil, err + } + state.Version.Consensus.App = appVersion + + // Done! 🎉 + s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + + return state, commit, nil +} + +// offerSnapshot offers a snapshot to the app. It returns various errors depending on the app's +// response, or nil if the snapshot was accepted. +func (s *syncer) offerSnapshot(snapshot *snapshot) error { + s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, + "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + resp, err := s.conn.OfferSnapshotSync(abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + }, + AppHash: snapshot.trustedAppHash, + }) + if err != nil { + return fmt.Errorf("failed to offer snapshot: %w", err) + } + switch resp.Result { + case abci.ResponseOfferSnapshot_ACCEPT: + s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height, + "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + return nil + case abci.ResponseOfferSnapshot_ABORT: + return errAbort + case abci.ResponseOfferSnapshot_REJECT: + return errRejectSnapshot + case abci.ResponseOfferSnapshot_REJECT_FORMAT: + return errRejectFormat + case abci.ResponseOfferSnapshot_REJECT_SENDER: + return errRejectSender + default: + return fmt.Errorf("unknown ResponseOfferSnapshot result %v", resp.Result) + } +} + +// applyChunks applies chunks to the app. It returns various errors depending on the app's +// response, or nil once the snapshot is fully restored. +func (s *syncer) applyChunks(chunks *chunkQueue) error { + for { + chunk, err := chunks.Next() + if err == errDone { + return nil + } else if err != nil { + return fmt.Errorf("failed to fetch chunk: %w", err) + } + + resp, err := s.conn.ApplySnapshotChunkSync(abci.RequestApplySnapshotChunk{ + Index: chunk.Index, + Chunk: chunk.Chunk, + Sender: string(chunk.Sender), + }) + if err != nil { + return fmt.Errorf("failed to apply chunk %v: %w", chunk.Index, err) + } + s.logger.Info("Applied snapshot chunk to ABCI app", "height", chunk.Height, + "format", chunk.Format, "chunk", chunk.Index, "total", chunks.Size()) + + // Discard and refetch any chunks as requested by the app + for _, index := range resp.RefetchChunks { + err := chunks.Discard(index) + if err != nil { + return fmt.Errorf("failed to discard chunk %v: %w", index, err) + } + } + + // Reject any senders as requested by the app + for _, sender := range resp.RejectSenders { + if sender != "" { + s.snapshots.RejectPeer(p2p.ID(sender)) + err := chunks.DiscardSender(p2p.ID(sender)) + if err != nil { + return fmt.Errorf("failed to reject sender: %w", err) + } + } + } + + switch resp.Result { + case abci.ResponseApplySnapshotChunk_ACCEPT: + case abci.ResponseApplySnapshotChunk_ABORT: + return errAbort + case abci.ResponseApplySnapshotChunk_RETRY: + chunks.Retry(chunk.Index) + case abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT: + return errRetrySnapshot + case abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT: + return errRejectSnapshot + default: + return fmt.Errorf("unknown ResponseApplySnapshotChunk result %v", resp.Result) + } + } +} + +// fetchChunks requests chunks from peers, receiving allocations from the chunk queue. Chunks +// will be received from the reactor via syncer.AddChunks() to chunkQueue.Add(). +func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) { + for { + index, err := chunks.Allocate() + if err == errDone { + // Keep checking until the context is cancelled (restore is done), in case any + // chunks need to be refetched. + select { + case <-ctx.Done(): + return + default: + } + time.Sleep(2 * time.Second) + continue + } + if err != nil { + s.logger.Error("Failed to allocate chunk from queue", "err", err) + return + } + s.logger.Info("Fetching snapshot chunk", "height", snapshot.Height, + "format", snapshot.Format, "chunk", index, "total", chunks.Size()) + + ticker := time.NewTicker(chunkRequestTimeout) + defer ticker.Stop() + s.requestChunk(snapshot, index) + select { + case <-chunks.WaitFor(index): + case <-ticker.C: + s.requestChunk(snapshot, index) + case <-ctx.Done(): + return + } + ticker.Stop() + } +} + +// requestChunk requests a chunk from a peer. +func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { + peer := s.snapshots.GetPeer(snapshot) + if peer == nil { + s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, + "format", snapshot.Format, "hash", snapshot.Hash) + return + } + s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, + "format", snapshot.Format, "chunk", chunk, "peer", peer.ID()) + peer.Send(ChunkChannel, mustEncodeMsg(&ssproto.ChunkRequest{ + Height: snapshot.Height, + Format: snapshot.Format, + Index: chunk, + })) +} + +// verifyApp verifies the sync, checking the app hash and last block height. It returns the +// app version, which should be returned as part of the initial state. +func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { + resp, err := s.connQuery.InfoSync(proxy.RequestInfo) + if err != nil { + return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) + } + if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) { + s.logger.Error("appHash verification failed", + "expected", fmt.Sprintf("%X", snapshot.trustedAppHash), + "actual", fmt.Sprintf("%X", resp.LastBlockAppHash)) + return 0, errVerifyFailed + } + if uint64(resp.LastBlockHeight) != snapshot.Height { + s.logger.Error("ABCI app reported unexpected last block height", + "expected", snapshot.Height, "actual", resp.LastBlockHeight) + return 0, errVerifyFailed + } + s.logger.Info("Verified ABCI app", "height", snapshot.Height, + "appHash", fmt.Sprintf("%X", snapshot.trustedAppHash)) + return resp.AppVersion, nil +} diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go new file mode 100644 index 000000000..c78509bfe --- /dev/null +++ b/statesync/syncer_test.go @@ -0,0 +1,661 @@ +package statesync + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/proxy" + proxymocks "github.com/tendermint/tendermint/proxy/mocks" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/statesync/mocks" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +// Sets up a basic syncer that can be used to test OfferSnapshot requests +func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + return syncer, connSnapshot +} + +// Sets up a simple peer mock with an ID +func simplePeer(id string) *p2pmocks.Peer { + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID(id)) + return peer +} + +func TestSyncer_SyncAny(t *testing.T) { + state := sm.State{ + ChainID: "chain", + Version: tmstate.Version{ + Consensus: tmversion.Consensus{ + Block: version.BlockProtocol, + App: 0, + }, + + Software: version.TMCoreSemVer, + }, + + LastBlockHeight: 1, + LastBlockID: types.BlockID{Hash: []byte("blockhash")}, + LastBlockTime: time.Now(), + LastResultsHash: []byte("last_results_hash"), + AppHash: []byte("app_hash"), + + LastValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val1")}}, + Validators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val2")}}, + NextValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val3")}}, + + ConsensusParams: *types.DefaultConsensusParams(), + LastHeightConsensusParamsChanged: 1, + } + commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}} + + chunks := []*chunk{ + {Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}}, + {Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 1, 1}}, + {Height: 1, Format: 1, Index: 2, Chunk: []byte{1, 1, 2}}, + } + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, uint64(1)).Return(state.AppHash, nil) + stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil) + stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) + stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) + connSnapshot := &proxymocks.AppConnSnapshot{} + connQuery := &proxymocks.AppConnQuery{} + + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + // Adding a chunk should error when no sync is in progress + _, err := syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) + require.Error(t, err) + + // Adding a couple of peers should trigger snapshot discovery messages + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerA.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) + syncer.AddPeer(peerA) + peerA.AssertExpectations(t) + + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + peerB.On("Send", SnapshotChannel, mustEncodeMsg(&ssproto.SnapshotsRequest{})).Return(true) + syncer.AddPeer(peerB) + peerB.AssertExpectations(t) + + // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in + // format 2, which will be rejected by the ABCI application. + new, err := syncer.AddSnapshot(peerA, s) + require.NoError(t, err) + assert.True(t, new) + + new, err = syncer.AddSnapshot(peerB, s) + require.NoError(t, err) + assert.False(t, new) + + new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, new) + + // We start a sync, with peers sending back chunks when requested. We first reject the snapshot + // with height 2 format 2, and accept the snapshot at height 1. + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: 2, + Format: 2, + Chunks: 3, + Hash: []byte{1}, + }, + AppHash: []byte("app_hash_2"), + }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + }, + AppHash: []byte("app_hash"), + }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) + + chunkRequests := make(map[uint32]int) + chunkRequestsMtx := tmsync.Mutex{} + onChunkRequest := func(args mock.Arguments) { + pb, err := decodeMsg(args[1].([]byte)) + require.NoError(t, err) + msg := pb.(*ssproto.ChunkRequest) + require.EqualValues(t, 1, msg.Height) + require.EqualValues(t, 1, msg.Format) + require.LessOrEqual(t, msg.Index, uint32(len(chunks))) + + added, err := syncer.AddChunk(chunks[msg.Index]) + require.NoError(t, err) + assert.True(t, added) + + chunkRequestsMtx.Lock() + chunkRequests[msg.Index]++ + chunkRequestsMtx.Unlock() + } + peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + + // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, + // which should cause it to keep the existing chunk 0 and 2, and restart restoration from + // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{1, 1, 2}, + }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( + &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, + RefetchChunks: []uint32{1}, + }, nil) + + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{1, 1, 0}, + }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1, 1, 1}, + }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{1, 1, 2}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connQuery.On("InfoSync", proxy.RequestInfo).Return(&abci.ResponseInfo{ + AppVersion: 9, + LastBlockHeight: 1, + LastBlockAppHash: []byte("app_hash"), + }, nil) + + newState, lastCommit, err := syncer.SyncAny(0) + require.NoError(t, err) + + time.Sleep(50 * time.Millisecond) // wait for peers to receive requests + + chunkRequestsMtx.Lock() + assert.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) + chunkRequestsMtx.Unlock() + + // The syncer should have updated the state app version from the ABCI info response. + expectState := state + expectState.Version.Consensus.App = 9 + + assert.Equal(t, expectState, newState) + assert.Equal(t, commit, lastCommit) + + connSnapshot.AssertExpectations(t) + connQuery.AssertExpectations(t) + peerA.AssertExpectations(t) + peerB.AssertExpectations(t) +} + +func TestSyncer_SyncAny_noSnapshots(t *testing.T) { + syncer, _ := setupOfferSyncer(t) + _, _, err := syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) +} + +func TestSyncer_SyncAny_abort(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(simplePeer("id"), s) + require.NoError(t, err) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + + _, _, err = syncer.SyncAny(0) + assert.Equal(t, errAbort, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + // s22 is tried first, then s12, then s11, then errNoSnapshots + s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(simplePeer("id"), s22) + require.NoError(t, err) + _, err = syncer.AddSnapshot(simplePeer("id"), s12) + require.NoError(t, err) + _, err = syncer.AddSnapshot(simplePeer("id"), s11) + require.NoError(t, err) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s12), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + _, _, err = syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject_format(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + // s22 is tried first, which reject s22 and s12, then s11 will abort. + s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(simplePeer("id"), s22) + require.NoError(t, err) + _, err = syncer.AddSnapshot(simplePeer("id"), s12) + require.NoError(t, err) + _, err = syncer.AddSnapshot(simplePeer("id"), s11) + require.NoError(t, err) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) + + _, _, err = syncer.SyncAny(0) + assert.Equal(t, errAbort, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject_sender(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + peerA := simplePeer("a") + peerB := simplePeer("b") + peerC := simplePeer("c") + + // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots + // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and + // errNoSnapshots is returned. + sa := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(peerA, sa) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, sb) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, sc) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, sbc) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, sbc) + require.NoError(t, err) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(sa), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) + + _, _, err = syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_abciError(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + errBoom := errors.New("boom") + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(simplePeer("id"), s) + require.NoError(t, err) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }).Once().Return(nil, errBoom) + + _, _, err = syncer.SyncAny(0) + assert.True(t, errors.Is(err, errBoom)) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_offerSnapshot(t *testing.T) { + unknownErr := errors.New("unknown error") + boom := errors.New("boom") + + testcases := map[string]struct { + result abci.ResponseOfferSnapshot_Result + err error + expectErr error + }{ + "accept": {abci.ResponseOfferSnapshot_ACCEPT, nil, nil}, + "abort": {abci.ResponseOfferSnapshot_ABORT, nil, errAbort}, + "reject": {abci.ResponseOfferSnapshot_REJECT, nil, errRejectSnapshot}, + "reject_format": {abci.ResponseOfferSnapshot_REJECT_FORMAT, nil, errRejectFormat}, + "reject_sender": {abci.ResponseOfferSnapshot_REJECT_SENDER, nil, errRejectSender}, + "unknown": {abci.ResponseOfferSnapshot_UNKNOWN, nil, unknownErr}, + "error": {0, boom, boom}, + "unknown non-zero": {9, nil, unknownErr}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), + AppHash: []byte("app_hash"), + }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) + err := syncer.offerSnapshot(s) + if tc.expectErr == unknownErr { + require.Error(t, err) + } else { + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + } + }) + } +} + +func TestSyncer_applyChunks_Results(t *testing.T) { + unknownErr := errors.New("unknown error") + boom := errors.New("boom") + + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + err error + expectErr error + }{ + "accept": {abci.ResponseApplySnapshotChunk_ACCEPT, nil, nil}, + "abort": {abci.ResponseApplySnapshotChunk_ABORT, nil, errAbort}, + "retry": {abci.ResponseApplySnapshotChunk_RETRY, nil, nil}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, nil, errRetrySnapshot}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, nil, errRejectSnapshot}, + "unknown": {abci.ResponseApplySnapshotChunk_UNKNOWN, nil, unknownErr}, + "error": {0, boom, boom}, + "unknown non-zero": {9, nil, unknownErr}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + body := []byte{1, 2, 3} + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") + require.NoError(t, err) + _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) + require.NoError(t, err) + + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: body, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) + if tc.result == abci.ResponseApplySnapshotChunk_RETRY { + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: body, + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + } + + err = syncer.applyChunks(chunks) + if tc.expectErr == unknownErr { + require.Error(t, err) + } else { + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + } + connSnapshot.AssertExpectations(t) + }) + } +} + +func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { + // Discarding chunks via refetch_chunks should work the same for all results + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + }{ + "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, + "abort": {abci.ResponseApplySnapshotChunk_ABORT}, + "retry": {abci.ResponseApplySnapshotChunk_RETRY}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") + require.NoError(t, err) + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}}) + require.True(t, added) + require.NoError(t, err) + + // The first two chunks are accepted, before the last one asks for 1 to be refetched + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{0}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: tc.result, + RefetchChunks: []uint32{1}, + }, nil) + + // Since removing the chunk will cause Next() to block, we spawn a goroutine, then + // check the queue contents, and finally close the queue to end the goroutine. + // We don't really care about the result of applyChunks, since it has separate test. + go func() { + syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error + }() + + time.Sleep(50 * time.Millisecond) + assert.True(t, chunks.Has(0)) + assert.False(t, chunks.Has(1)) + assert.True(t, chunks.Has(2)) + err = chunks.Close() + require.NoError(t, err) + }) + } +} + +func TestSyncer_applyChunks_RejectSenders(t *testing.T) { + // Banning chunks senders via ban_chunk_senders should work the same for all results + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + }{ + "accept": {abci.ResponseApplySnapshotChunk_ACCEPT}, + "abort": {abci.ResponseApplySnapshotChunk_ABORT}, + "retry": {abci.ResponseApplySnapshotChunk_RETRY}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + // Set up three peers across two snapshots, and ask for one of them to be banned. + // It should be banned from all snapshots. + peerA := simplePeer("a") + peerB := simplePeer("b") + peerC := simplePeer("c") + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 3} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 3} + _, err := syncer.AddSnapshot(peerA, s1) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerA, s2) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, s1) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, s2) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, s1) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, s2) + require.NoError(t, err) + + chunks, err := newChunkQueue(s1, "") + require.NoError(t, err) + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerA.ID()}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerB.ID()}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerC.ID()}) + require.True(t, added) + require.NoError(t, err) + + // The first two chunks are accepted, before the last one asks for b sender to be rejected + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{0}, Sender: "a", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1}, Sender: "b", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "c", + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: tc.result, + RejectSenders: []string{string(peerB.ID())}, + }, nil) + + // On retry, the last chunk will be tried again, so we just accept it then. + if tc.result == abci.ResponseApplySnapshotChunk_RETRY { + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "c", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + } + + // We don't really care about the result of applyChunks, since it has separate test. + // However, it will block on e.g. retry result, so we spawn a goroutine that will + // be shut down when the chunk queue closes. + go func() { + syncer.applyChunks(chunks) //nolint:errcheck // purposefully ignore error + }() + + time.Sleep(50 * time.Millisecond) + + s1peers := syncer.snapshots.GetPeers(s1) + assert.Len(t, s1peers, 2) + assert.EqualValues(t, "a", s1peers[0].ID()) + assert.EqualValues(t, "c", s1peers[1].ID()) + + syncer.snapshots.GetPeers(s1) + assert.Len(t, s1peers, 2) + assert.EqualValues(t, "a", s1peers[0].ID()) + assert.EqualValues(t, "c", s1peers[1].ID()) + + err = chunks.Close() + require.NoError(t, err) + }) + } +} + +func TestSyncer_verifyApp(t *testing.T) { + boom := errors.New("boom") + s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + + testcases := map[string]struct { + response *abci.ResponseInfo + err error + expectErr error + }{ + "verified": {&abci.ResponseInfo{ + LastBlockHeight: 3, + LastBlockAppHash: []byte("app_hash"), + AppVersion: 9, + }, nil, nil}, + "invalid height": {&abci.ResponseInfo{ + LastBlockHeight: 5, + LastBlockAppHash: []byte("app_hash"), + AppVersion: 9, + }, nil, errVerifyFailed}, + "invalid hash": {&abci.ResponseInfo{ + LastBlockHeight: 3, + LastBlockAppHash: []byte("xxx"), + AppVersion: 9, + }, nil, errVerifyFailed}, + "error": {nil, boom, boom}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + connQuery.On("InfoSync", proxy.RequestInfo).Return(tc.response, tc.err) + version, err := syncer.verifyApp(s) + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + if err == nil { + assert.Equal(t, tc.response.AppVersion, version) + } + }) + } +} + +func toABCI(s *snapshot) *abci.Snapshot { + return &abci.Snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + } +} diff --git a/store/codec.go b/store/codec.go deleted file mode 100644 index 29a59948d..000000000 --- a/store/codec.go +++ /dev/null @@ -1,13 +0,0 @@ -package store - -import ( - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - types.RegisterBlockAmino(cdc) -} diff --git a/store/store.go b/store/store.go index c971a9a15..9ae4d555d 100644 --- a/store/store.go +++ b/store/store.go @@ -3,13 +3,13 @@ package store import ( "fmt" "strconv" - "sync" - "github.com/pkg/errors" - - db "github.com/tendermint/tm-db" + "github.com/gogo/protobuf/proto" dbm "github.com/tendermint/tm-db" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmstore "github.com/tendermint/tendermint/proto/tendermint/store" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -33,7 +33,12 @@ The store can be assumed to contain all contiguous blocks between base and heigh type BlockStore struct { db dbm.DB - mtx sync.RWMutex + // mtx guards access to the struct fields listed below it. We rely on the database to enforce + // fine-grained concurrency control for its data, and thus this mutex does not apply to + // database contents. The only reason for keeping these fields in the struct is that the data + // can't efficiently be queried from the database since the key encoding we use is not + // lexicographically ordered (see https://github.com/tendermint/tendermint/issues/4567). + mtx tmsync.RWMutex base int64 height int64 } @@ -41,10 +46,10 @@ type BlockStore struct { // NewBlockStore returns a new BlockStore with the given DB, // initialized to the last height that was committed to the DB. func NewBlockStore(db dbm.DB) *BlockStore { - bsjson := LoadBlockStoreStateJSON(db) + bs := LoadBlockStoreState(db) return &BlockStore{ - base: bsjson.Base, - height: bsjson.Height, + base: bs.Base, + height: bs.Height, db: db, } } @@ -73,6 +78,16 @@ func (bs *BlockStore) Size() int64 { return bs.height - bs.base + 1 } +// LoadBase atomically loads the base block meta, or returns nil if no base is found. +func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + if bs.base == 0 { + return nil + } + return bs.LoadBlockMeta(bs.base) +} + // LoadBlock returns the block with the given height. // If no block is found for that height, it returns nil. func (bs *BlockStore) LoadBlock(height int64) *types.Block { @@ -81,18 +96,29 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { return nil } - var block = new(types.Block) + pbb := new(tmproto.Block) buf := []byte{} - for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { + for i := 0; i < int(blockMeta.BlockID.PartSetHeader.Total); i++ { part := bs.LoadBlockPart(height, i) + // If the part is missing (e.g. since it has been deleted after we + // loaded the block meta) we consider the whole block to be missing. + if part == nil { + return nil + } buf = append(buf, part.Bytes...) } - err := cdc.UnmarshalBinaryLengthPrefixed(buf, block) + err := proto.Unmarshal(buf, pbb) if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. - panic(errors.Wrap(err, "Error reading block")) + panic(fmt.Sprintf("Error reading block: %v", err)) } + + block, err := types.BlockFromProto(pbb) + if err != nil { + panic(fmt.Errorf("error from proto block: %w", err)) + } + return block } @@ -112,7 +138,7 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { height, err := strconv.ParseInt(s, 10, 64) if err != nil { - panic(errors.Wrapf(err, "failed to extract height from %s", s)) + panic(fmt.Sprintf("failed to extract height from %s: %v", s, err)) } return bs.LoadBlock(height) } @@ -121,7 +147,8 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { // from the block at the given height. // If no part is found for the given height and index, it returns nil. func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { - var part = new(types.Part) + var pbpart = new(tmproto.Part) + bz, err := bs.db.Get(calcBlockPartKey(height, index)) if err != nil { panic(err) @@ -129,28 +156,43 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { if len(bz) == 0 { return nil } - err = cdc.UnmarshalBinaryBare(bz, part) + + err = proto.Unmarshal(bz, pbpart) + if err != nil { + panic(fmt.Errorf("unmarshal to tmproto.Part failed: %w", err)) + } + part, err := types.PartFromProto(pbpart) if err != nil { - panic(errors.Wrap(err, "Error reading block part")) + panic(fmt.Sprintf("Error reading block part: %v", err)) } + return part } // LoadBlockMeta returns the BlockMeta for the given height. // If no block is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { - var blockMeta = new(types.BlockMeta) + var pbbm = new(tmproto.BlockMeta) bz, err := bs.db.Get(calcBlockMetaKey(height)) + if err != nil { panic(err) } + if len(bz) == 0 { return nil } - err = cdc.UnmarshalBinaryBare(bz, blockMeta) + + err = proto.Unmarshal(bz, pbbm) if err != nil { - panic(errors.Wrap(err, "Error reading block meta")) + panic(fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err)) } + + blockMeta, err := types.BlockMetaFromProto(pbbm) + if err != nil { + panic(fmt.Errorf("error from proto blockMeta: %w", err)) + } + return blockMeta } @@ -159,7 +201,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { // and it comes from the block.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { - var commit = new(types.Commit) + var pbc = new(tmproto.Commit) bz, err := bs.db.Get(calcBlockCommitKey(height)) if err != nil { panic(err) @@ -167,9 +209,13 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { if len(bz) == 0 { return nil } - err = cdc.UnmarshalBinaryBare(bz, commit) + err = proto.Unmarshal(bz, pbc) + if err != nil { + panic(fmt.Errorf("error reading block commit: %w", err)) + } + commit, err := types.CommitFromProto(pbc) if err != nil { - panic(errors.Wrap(err, "Error reading block commit")) + panic(fmt.Sprintf("Error reading block commit: %v", err)) } return commit } @@ -178,7 +224,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { // This is useful when we've seen a commit, but there has not yet been // a new block at `height + 1` that includes this commit in its block.LastCommit. func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { - var commit = new(types.Commit) + var pbc = new(tmproto.Commit) bz, err := bs.db.Get(calcSeenCommitKey(height)) if err != nil { panic(err) @@ -186,9 +232,14 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { if len(bz) == 0 { return nil } - err = cdc.UnmarshalBinaryBare(bz, commit) + err = proto.Unmarshal(bz, pbc) if err != nil { - panic(errors.Wrap(err, "Error reading block seen commit")) + panic(fmt.Sprintf("error reading block seen commit: %v", err)) + } + + commit, err := types.CommitFromProto(pbc) + if err != nil { + panic(fmt.Errorf("error from proto commit: %w", err)) } return commit } @@ -213,7 +264,7 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { pruned := uint64(0) batch := bs.db.NewBatch() defer batch.Close() - flush := func(batch db.Batch, base int64) error { + flush := func(batch dbm.Batch, base int64) error { // We can't trust batches to be atomic, so update base first to make sure noone // tries to access missing blocks. bs.mtx.Lock() @@ -234,12 +285,22 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { if meta == nil { // assume already deleted continue } - batch.Delete(calcBlockMetaKey(h)) - batch.Delete(calcBlockHashKey(meta.BlockID.Hash)) - batch.Delete(calcBlockCommitKey(h)) - batch.Delete(calcSeenCommitKey(h)) - for p := 0; p < meta.BlockID.PartsHeader.Total; p++ { - batch.Delete(calcBlockPartKey(h, p)) + if err := batch.Delete(calcBlockMetaKey(h)); err != nil { + return 0, err + } + if err := batch.Delete(calcBlockHashKey(meta.BlockID.Hash)); err != nil { + return 0, err + } + if err := batch.Delete(calcBlockCommitKey(h)); err != nil { + return 0, err + } + if err := batch.Delete(calcSeenCommitKey(h)); err != nil { + return 0, err + } + for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ { + if err := batch.Delete(calcBlockPartKey(h, p)); err != nil { + return 0, err + } } pruned++ @@ -279,29 +340,46 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) } if !blockParts.IsComplete() { - panic(fmt.Sprintf("BlockStore can only save complete block part sets")) + panic("BlockStore can only save complete block part sets") } - // Save block meta - blockMeta := types.NewBlockMeta(block, blockParts) - metaBytes := cdc.MustMarshalBinaryBare(blockMeta) - bs.db.Set(calcBlockMetaKey(height), metaBytes) - bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))) - - // Save block parts - for i := 0; i < blockParts.Total(); i++ { + // Save block parts. This must be done before the block meta, since callers + // typically load the block meta first as an indication that the block exists + // and then go on to load block parts - we must make sure the block is + // complete as soon as the block meta is written. + for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) bs.saveBlockPart(height, i, part) } + // Save block meta + blockMeta := types.NewBlockMeta(block, blockParts) + pbm := blockMeta.ToProto() + if pbm == nil { + panic("nil blockmeta") + } + metaBytes := mustEncode(pbm) + if err := bs.db.Set(calcBlockMetaKey(height), metaBytes); err != nil { + panic(err) + } + if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { + panic(err) + } + // Save block commit (duplicate and separate from the Block) - blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit) - bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes) + pbc := block.LastCommit.ToProto() + blockCommitBytes := mustEncode(pbc) + if err := bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { + panic(err) + } // Save seen commit (seen +2/3 precommits for block) // NOTE: we can delete this at a later height - seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) - bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) + pbsc := seenCommit.ToProto() + seenCommitBytes := mustEncode(pbsc) + if err := bs.db.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil { + panic(err) + } // Done! bs.mtx.Lock() @@ -311,26 +389,39 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } bs.mtx.Unlock() - // Save new BlockStoreStateJSON descriptor + // Save new BlockStoreState descriptor. This also flushes the database. bs.saveState() - - // Flush - bs.db.SetSync(nil, nil) } func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { - partBytes := cdc.MustMarshalBinaryBare(part) - bs.db.Set(calcBlockPartKey(height, index), partBytes) + pbp, err := part.ToProto() + if err != nil { + panic(fmt.Errorf("unable to make part into proto: %w", err)) + } + partBytes := mustEncode(pbp) + if err := bs.db.Set(calcBlockPartKey(height, index), partBytes); err != nil { + panic(err) + } } func (bs *BlockStore) saveState() { bs.mtx.RLock() - bsJSON := BlockStoreStateJSON{ + bss := tmstore.BlockStoreState{ Base: bs.base, Height: bs.height, } bs.mtx.RUnlock() - bsJSON.Save(bs.db) + SaveBlockStoreState(&bss, bs.db) +} + +// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. +func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error { + pbc := seenCommit.ToProto() + seenCommitBytes, err := proto.Marshal(pbc) + if err != nil { + return fmt.Errorf("unable to marshal commit: %w", err) + } + return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) } //----------------------------------------------------------------------------- @@ -359,42 +450,49 @@ func calcBlockHashKey(hash []byte) []byte { var blockStoreKey = []byte("blockStore") -// BlockStoreStateJSON is the block store state JSON structure. -type BlockStoreStateJSON struct { - Base int64 `json:"base"` - Height int64 `json:"height"` -} - -// Save persists the blockStore state to the database as JSON. -func (bsj BlockStoreStateJSON) Save(db dbm.DB) { - bytes, err := cdc.MarshalJSON(bsj) +// SaveBlockStoreState persists the blockStore state to the database. +func SaveBlockStoreState(bsj *tmstore.BlockStoreState, db dbm.DB) { + bytes, err := proto.Marshal(bsj) if err != nil { panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) } - db.SetSync(blockStoreKey, bytes) + if err := db.SetSync(blockStoreKey, bytes); err != nil { + panic(err) + } } -// LoadBlockStoreStateJSON returns the BlockStoreStateJSON as loaded from disk. -// If no BlockStoreStateJSON was previously persisted, it returns the zero value. -func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { +// LoadBlockStoreState returns the BlockStoreState as loaded from disk. +// If no BlockStoreState was previously persisted, it returns the zero value. +func LoadBlockStoreState(db dbm.DB) tmstore.BlockStoreState { bytes, err := db.Get(blockStoreKey) if err != nil { panic(err) } + if len(bytes) == 0 { - return BlockStoreStateJSON{ + return tmstore.BlockStoreState{ Base: 0, Height: 0, } } - bsj := BlockStoreStateJSON{} - err = cdc.UnmarshalJSON(bytes, &bsj) - if err != nil { + + var bsj tmstore.BlockStoreState + if err := proto.Unmarshal(bytes, &bsj); err != nil { panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) } + // Backwards compatibility with persisted data from before Base existed. if bsj.Height > 0 && bsj.Base == 0 { bsj.Base = 1 } return bsj } + +// mustEncode proto encodes a proto.message and panics if fails +func mustEncode(pb proto.Message) []byte { + bz, err := proto.Marshal(pb) + if err != nil { + panic(fmt.Errorf("unable to marshal: %w", err)) + } + return bz +} diff --git a/store/store_test.go b/store/store_test.go index 3b61604e1..ea07c73e6 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -9,18 +9,21 @@ import ( "testing" "time" - "github.com/pkg/errors" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - db "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmstore "github.com/tendermint/tendermint/proto/tendermint/store" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) // A cleanupFunc cleans up any config / test files created for a particular @@ -31,11 +34,12 @@ type cleanupFunc func() func makeTestCommit(height int64, timestamp time.Time) *types.Commit { commitSigs := []types.CommitSig{{ BlockIDFlag: types.BlockIDFlagCommit, - ValidatorAddress: []byte("ValidatorAddress"), + ValidatorAddress: tmrand.Bytes(crypto.AddressSize), Timestamp: timestamp, Signature: []byte("Signature"), }} - return types.NewCommit(height, 0, types.BlockID{}, commitSigs) + return types.NewCommit(height, 0, + types.BlockID{Hash: []byte(""), PartSetHeader: types.PartSetHeader{Hash: []byte(""), Total: 2}}, commitSigs) } func makeTxs(height int64) (txs []types.Tx) { @@ -56,45 +60,42 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { - panic(errors.Wrap(err, "error constructing state from genesis file")) + panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } } -func TestLoadBlockStoreStateJSON(t *testing.T) { - db := db.NewMemDB() - bsj := &BlockStoreStateJSON{Base: 100, Height: 1000} - bsj.Save(db) - - retrBSJ := LoadBlockStoreStateJSON(db) - assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") -} - -func TestLoadBlockStoreStateJSON_Empty(t *testing.T) { - db := db.NewMemDB() - - bsj := &BlockStoreStateJSON{} - bsj.Save(db) +func TestLoadBlockStoreState(t *testing.T) { - retrBSJ := LoadBlockStoreStateJSON(db) - assert.Equal(t, BlockStoreStateJSON{}, retrBSJ, "expected the retrieved DBs to match") -} - -func TestLoadBlockStoreStateJSON_NoBase(t *testing.T) { - db := db.NewMemDB() + type blockStoreTest struct { + testName string + bss *tmstore.BlockStoreState + want tmstore.BlockStoreState + } - bsj := &BlockStoreStateJSON{Height: 1000} - bsj.Save(db) + testCases := []blockStoreTest{ + {"success", &tmstore.BlockStoreState{Base: 100, Height: 1000}, + tmstore.BlockStoreState{Base: 100, Height: 1000}}, + {"empty", &tmstore.BlockStoreState{}, tmstore.BlockStoreState{}}, + {"no base", &tmstore.BlockStoreState{Height: 1000}, tmstore.BlockStoreState{Base: 1, Height: 1000}}, + } - retrBSJ := LoadBlockStoreStateJSON(db) - assert.Equal(t, BlockStoreStateJSON{Base: 1, Height: 1000}, retrBSJ, "expected the retrieved DBs to match") + for _, tc := range testCases { + db := dbm.NewMemDB() + SaveBlockStoreState(tc.bss, db) + retrBSJ := LoadBlockStoreState(db) + assert.Equal(t, tc.want, retrBSJ, "expected the retrieved DBs to match: %s", tc.testName) + } } func TestNewBlockStore(t *testing.T) { - db := db.NewMemDB() - err := db.Set(blockStoreKey, []byte(`{"base": "100", "height": "10000"}`)) + db := dbm.NewMemDB() + bss := tmstore.BlockStoreState{Base: 100, Height: 10000} + bz, _ := proto.Marshal(&bss) + err := db.Set(blockStoreKey, bz) require.NoError(t, err) bs := NewBlockStore(db) require.Equal(t, int64(100), bs.Base(), "failed to properly parse blockstore") @@ -121,14 +122,14 @@ func TestNewBlockStore(t *testing.T) { assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } - err = db.Set(blockStoreKey, nil) + err = db.Set(blockStoreKey, []byte{}) require.NoError(t, err) bs = NewBlockStore(db) - assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright") + assert.Equal(t, bs.Height(), int64(0), "expecting empty bytes to be unmarshaled alright") } -func freshBlockStore() (*BlockStore, db.DB) { - db := db.NewMemDB() +func freshBlockStore() (*BlockStore, dbm.DB) { + db := dbm.NewMemDB() return NewBlockStore(db), db } @@ -180,12 +181,15 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) - uncontiguousPartSet.AddPart(part2) + _, err := uncontiguousPartSet.AddPart(part2) + require.Error(t, err) header1 := types.Header{ - Height: 1, - ChainID: "block_test", - Time: tmtime.Now(), + Version: tmversion.Consensus{Block: version.BlockProtocol}, + Height: 1, + ChainID: "block_test", + Time: tmtime.Now(), + ProposerAddress: tmrand.Bytes(crypto.AddressSize), } // End of setup, test data @@ -217,7 +221,12 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock( // New block at height 5 in empty block store is fine - types.Header{Height: 5, ChainID: "block_test", Time: tmtime.Now()}, + types.Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + Height: 5, + ChainID: "block_test", + Time: tmtime.Now(), + ProposerAddress: tmrand.Bytes(crypto.AddressSize)}, makeTestCommit(5, tmtime.Now()), ), parts: validPartSet, @@ -235,14 +244,14 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { parts: validPartSet, seenCommit: seenCommit1, corruptCommitInDB: true, // Corrupt the DB's commit entry - wantPanic: "unmarshal to types.Commit failed", + wantPanic: "error reading block commit", }, { block: newBlock(header1, commitAtH10), parts: validPartSet, seenCommit: seenCommit1, - wantPanic: "unmarshal to types.BlockMeta failed", + wantPanic: "unmarshal to tmproto.BlockMeta", corruptBlockInDB: true, // Corrupt the DB's block entry }, @@ -261,7 +270,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { seenCommit: seenCommit1, corruptSeenCommitInDB: true, - wantPanic: "unmarshal to types.Commit failed", + wantPanic: "error reading block seen commit", }, { @@ -300,7 +309,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) if tuple.eraseSeenCommitInDB { - db.Delete(calcSeenCommitKey(tuple.block.Height)) + err := db.Delete(calcSeenCommitKey(tuple.block.Height)) + require.NoError(t, err) } if tuple.corruptSeenCommitInDB { err := db.Set(calcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) @@ -310,7 +320,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { commitHeight := tuple.block.Height - 1 if tuple.eraseCommitInDB { - db.Delete(calcBlockCommitKey(commitHeight)) + err := db.Delete(calcBlockCommitKey(commitHeight)) + require.NoError(t, err) } if tuple.corruptCommitInDB { err := db.Set(calcBlockCommitKey(commitHeight), []byte("foo-bogus")) @@ -355,6 +366,29 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } } +func TestLoadBaseMeta(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + stateStore := sm.NewStore(dbm.NewMemDB()) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) + require.NoError(t, err) + bs := NewBlockStore(dbm.NewMemDB()) + + for h := int64(1); h <= 10; h++ { + block := makeBlock(h, state, new(types.Commit)) + partSet := block.MakePartSet(2) + seenCommit := makeTestCommit(h, tmtime.Now()) + bs.SaveBlock(block, partSet, seenCommit) + } + + _, err = bs.PruneBlocks(4) + require.NoError(t, err) + + baseBlock := bs.LoadBaseMeta() + assert.EqualValues(t, 4, baseBlock.Header.Height) + assert.EqualValues(t, 4, bs.Base()) +} + func TestLoadBlockPart(t *testing.T) { bs, db := freshBlockStore() height, index := int64(10), 1 @@ -374,10 +408,12 @@ func TestLoadBlockPart(t *testing.T) { require.NoError(t, err) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "unmarshal to types.Part failed") + require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed") // 3. A good block serialized and saved to the DB should be retrievable - err = db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) + pb1, err := part1.ToProto() + require.NoError(t, err) + err = db.Set(calcBlockPartKey(height, index), mustEncode(pb1)) require.NoError(t, err) gotPart, _, panicErr := doFn(loadPart) require.Nil(t, panicErr, "an existent and proper block should not panic") @@ -389,7 +425,8 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - state, err := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) + stateStore := sm.NewStore(dbm.NewMemDB()) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db) @@ -425,10 +462,10 @@ func TestPruneBlocks(t *testing.T) { assert.EqualValues(t, 1200, bs.Base()) assert.EqualValues(t, 1500, bs.Height()) assert.EqualValues(t, 301, bs.Size()) - assert.EqualValues(t, BlockStoreStateJSON{ + assert.EqualValues(t, tmstore.BlockStoreState{ Base: 1200, Height: 1500, - }, LoadBlockStoreStateJSON(db)) + }, LoadBlockStoreState(db)) require.NotNil(t, bs.LoadBlock(1200)) require.Nil(t, bs.LoadBlock(1199)) @@ -491,17 +528,24 @@ func TestLoadBlockMeta(t *testing.T) { require.NoError(t, err) res, _, panicErr = doFn(loadMeta) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "unmarshal to types.BlockMeta") + require.Contains(t, panicErr.Error(), "unmarshal to tmproto.BlockMeta") // 3. A good blockMeta serialized and saved to the DB should be retrievable - meta := &types.BlockMeta{} - err = db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta)) + meta := &types.BlockMeta{Header: types.Header{ + Version: tmversion.Consensus{ + Block: version.BlockProtocol, App: 0}, Height: 1, ProposerAddress: tmrand.Bytes(crypto.AddressSize)}} + pbm := meta.ToProto() + err = db.Set(calcBlockMetaKey(height), mustEncode(pbm)) require.NoError(t, err) gotMeta, _, panicErr := doFn(loadMeta) require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") - require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta), - "expecting successful retrieval of previously saved blockMeta") + pbmeta := meta.ToProto() + if gmeta, ok := gotMeta.(*types.BlockMeta); ok { + pbgotMeta := gmeta.ToProto() + require.Equal(t, mustEncode(pbmeta), mustEncode(pbgotMeta), + "expecting successful retrieval of previously saved blockMeta") + } } func TestBlockFetchAtHeight(t *testing.T) { @@ -516,8 +560,12 @@ func TestBlockFetchAtHeight(t *testing.T) { require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") blockAtHeight := bs.LoadBlock(bs.Height()) - bz1 := cdc.MustMarshalBinaryBare(block) - bz2 := cdc.MustMarshalBinaryBare(blockAtHeight) + b1, err := block.ToProto() + require.NoError(t, err) + b2, err := blockAtHeight.ToProto() + require.NoError(t, err) + bz1 := mustEncode(b1) + bz2 := mustEncode(b2) require.Equal(t, bz1, bz2) require.Equal(t, block.Hash(), blockAtHeight.Hash(), "expecting a successful load of the last saved block") diff --git a/test/Makefile b/test/Makefile new file mode 100644 index 000000000..77b29cfa0 --- /dev/null +++ b/test/Makefile @@ -0,0 +1,69 @@ +#!/usr/bin/make -f + +######################################## +### Testing + +BINDIR ?= $(GOPATH)/bin + +## required to be run first by most tests +build_docker_test_image: + docker build -t tester -f ./test/docker/Dockerfile . +.PHONY: build_docker_test_image + +### coverage, app, persistence, and libs tests +test_cover: + # run the go unit tests with coverage + bash test/test_cover.sh +.PHONY: test_cover + +test_apps: + # run the app tests using bash + # requires `abci-cli` and `tendermint` binaries installed + bash test/app/test.sh +.PHONY: test_apps + +test_abci_apps: + bash abci/tests/test_app/test.sh +.PHONY: test_abci_apps + +test_abci_cli: + # test the cli against the examples in the tutorial at: + # ./docs/abci-cli.md + # if test fails, update the docs ^ + @ bash abci/tests/test_cli/test.sh +.PHONY: test_abci_cli + +test_integrations: + make build_docker_test_image + make tools + make install + make test_cover + make test_apps + make test_abci_apps + make test_abci_cli + make test_libs +.PHONY: test_integrations + +test_release: + @go test -tags release $(PACKAGES) +.PHONY: test_release + +test100: + @for i in {1..100}; do make test; done +.PHONY: test100 + +### go tests +test: + @echo "--> Running go test" + @go test -p 1 $(PACKAGES) -tags deadlock +.PHONY: test + +test_race: + @echo "--> Running go test --race" + @go test -p 1 -v -race $(PACKAGES) +.PHONY: test_race + +test_deadlock: + @echo "--> Running go test --deadlock" + @go test -p 1 -v $(PACKAGES) -tags deadlock +.PHONY: test_race diff --git a/test/README.md b/test/README.md index fc436948a..0e0d666e5 100644 --- a/test/README.md +++ b/test/README.md @@ -7,15 +7,10 @@ Running the integrations test will build a docker container with local version o and run the following tests in docker containers: - go tests, with --race - - includes test coverage + - includes test coverage - app tests - - kvstore app over socket - - counter app over socket - - counter app over grpc + - kvstore app over socket + - counter app over socket + - counter app over grpc - persistence tests - - crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app -- p2p tests - - start a local kvstore app testnet on a docker network (requires docker version 1.10+) - - send a tx on each node and ensure the state root is updated on all of them - - crash and restart nodes one at a time and ensure they can sync back up (via fastsync) - - crash and restart all nodes at once and ensure they can sync back up + - crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index 88e4650ab..73022aaf8 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -7,8 +7,7 @@ import ( "context" - amino "github.com/tendermint/go-amino" - + tmjson "github.com/tendermint/tendermint/libs/json" coregrpc "github.com/tendermint/tendermint/rpc/grpc" ) @@ -34,7 +33,7 @@ func main() { os.Exit(1) } - bz, err := amino.NewCodec().MarshalJSON(res) + bz, err := tmjson.Marshal(res) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index fb5458e82..3628be9f9 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,7 +1,4 @@ -FROM golang:1.13 - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list +FROM golang:1.15 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ @@ -28,11 +25,11 @@ RUN make install_abci RUN make install RUN tendermint testnet \ - --config $REPO/test/docker/config-template.toml \ - --node-dir-prefix="mach" \ - --v=4 \ - --populate-persistent-peers=false \ - --o=$REPO/test/p2p/data + --config $REPO/test/docker/config-template.toml \ + --node-dir-prefix="mach" \ + --v=4 \ + --populate-persistent-peers=false \ + --o=$REPO/test/p2p/data # Now copy in the code # NOTE: this will overwrite whatever is in vendor/ diff --git a/test/e2e/Makefile b/test/e2e/Makefile new file mode 100644 index 000000000..c9eb8bc19 --- /dev/null +++ b/test/e2e/Makefile @@ -0,0 +1,23 @@ +all: docker generator runner + +docker: + docker build --tag tendermint/e2e-node -f docker/Dockerfile ../.. + +# We need to build support for database backends into the app in +# order to build a binary with a Tendermint node in it (for built-in +# ABCI testing). +app: + go build -o build/app -tags badgerdb,boltdb,cleveldb,rocksdb ./app + +# To be used primarily by the e2e docker instance. If you want to produce this binary +# elsewhere, then run go build in the maverick directory. +maverick: + go build -o build/maverick -tags badgerdb,boltdb,cleveldb,rocksdb ../maverick + +generator: + go build -o build/generator ./generator + +runner: + go build -o build/runner ./runner + +.PHONY: all app docker generator maverick runner diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 000000000..d17864611 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,96 @@ +# End-to-End Tests + +Spins up and tests Tendermint networks in Docker Compose based on a testnet manifest. To run the CI testnet: + +```sh +make +./build/runner -f networks/ci.toml +``` + +This creates and runs a testnet named `ci` under `networks/ci/` (determined by the manifest filename). + +## Testnet Manifests + +Testnets are specified as TOML manifests. For an example see [`networks/ci.toml`](networks/ci.toml), and for documentation see [`pkg/manifest.go`](pkg/manifest.go). + +## Random Testnet Generation + +Random (but deterministic) combinations of testnets can be generated with `generator`: + +```sh +./build/generator -d networks/generated/ + +# Split networks into 8 groups (by filename) +./build/generator -g 8 -d networks/generated/ +``` + +Multiple testnets can be run with the `run-multiple.sh` script: + +```sh +./run-multiple.sh networks/generated/gen-group3-*.toml +``` + +## Test Stages + +The test runner has the following stages, which can also be executed explicitly by running `./build/runner -f `: + +* `setup`: generates configuration files. + +* `start`: starts Docker containers. + +* `load`: generates a transaction load against the testnet nodes. + +* `perturb`: runs any requested perturbations (e.g. node restarts or network disconnects). + +* `wait`: waits for a few blocks to be produced, and for all nodes to catch up to it. + +* `test`: runs test cases in `tests/` against all nodes in a running testnet. + +* `stop`: stops Docker containers. + +* `cleanup`: removes configuration files and Docker containers/networks. + +* `logs`: outputs all node logs. + +* `tail`: tails (follows) node logs until cancelled. + +## Tests + +Test cases are written as normal Go tests in `tests/`. They use a `testNode()` helper which executes each test as a parallel subtest for each node in the network. + +### Running Manual Tests + +To run tests manually, set the `E2E_MANIFEST` environment variable to the path of the testnet manifest (e.g. `networks/ci.toml`) and run them as normal, e.g.: + +```sh +./build/runner -f networks/ci.toml start +E2E_MANIFEST=networks/ci.toml go test -v ./tests/... +``` + +Optionally, `E2E_NODE` specifies the name of a single testnet node to test. + +These environment variables can also be specified in `tests/e2e_test.go` to run tests from an editor or IDE: + +```go +func init() { + // This can be used to manually specify a testnet manifest and/or node to + // run tests against. The testnet must have been started by the runner first. + os.Setenv("E2E_MANIFEST", "networks/ci.toml") + os.Setenv("E2E_NODE", "validator01") +} +``` + +### Debugging Failures + +If a command or test fails, the runner simply exits with an error message and non-zero status code. The testnet is left running with data in the testnet directory, and can be inspected with e.g. `docker ps`, `docker logs`, or `./build/runner -f logs` or `tail`. To shut down and remove the testnet, run `./build/runner -f cleanup`. + +## Enabling IPv6 + +Docker does not enable IPv6 by default. To do so, enter the following in `daemon.json` (or in the Docker for Mac UI under Preferences → Docker Engine): + +```json +{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" +} +``` diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go new file mode 100644 index 000000000..13002a708 --- /dev/null +++ b/test/e2e/app/app.go @@ -0,0 +1,219 @@ +package main + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/tendermint/tendermint/abci/example/code" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/version" +) + +// Application is an ABCI application for use by end-to-end tests. It is a +// simple key/value store for strings, storing data in memory and persisting +// to disk as JSON, taking state sync snapshots if requested. +type Application struct { + abci.BaseApplication + logger log.Logger + state *State + snapshots *SnapshotStore + cfg *Config + restoreSnapshot *abci.Snapshot + restoreChunks [][]byte +} + +// NewApplication creates the application. +func NewApplication(cfg *Config) (*Application, error) { + state, err := NewState(filepath.Join(cfg.Dir, "state.json"), cfg.PersistInterval) + if err != nil { + return nil, err + } + snapshots, err := NewSnapshotStore(filepath.Join(cfg.Dir, "snapshots")) + if err != nil { + return nil, err + } + return &Application{ + logger: log.NewTMLogger(log.NewSyncWriter(os.Stdout)), + state: state, + snapshots: snapshots, + cfg: cfg, + }, nil +} + +// Info implements ABCI. +func (app *Application) Info(req abci.RequestInfo) abci.ResponseInfo { + return abci.ResponseInfo{ + Version: version.ABCIVersion, + AppVersion: 1, + LastBlockHeight: int64(app.state.Height), + LastBlockAppHash: app.state.Hash, + } +} + +// Info implements ABCI. +func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { + var err error + app.state.initialHeight = uint64(req.InitialHeight) + if len(req.AppStateBytes) > 0 { + err = app.state.Import(0, req.AppStateBytes) + if err != nil { + panic(err) + } + } + resp := abci.ResponseInitChain{ + AppHash: app.state.Hash, + } + if resp.Validators, err = app.validatorUpdates(0); err != nil { + panic(err) + } + return resp +} + +// CheckTx implements ABCI. +func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + _, _, err := parseTx(req.Tx) + if err != nil { + return abci.ResponseCheckTx{ + Code: code.CodeTypeEncodingError, + Log: err.Error(), + } + } + return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} +} + +// DeliverTx implements ABCI. +func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + key, value, err := parseTx(req.Tx) + if err != nil { + panic(err) // shouldn't happen since we verified it in CheckTx + } + app.state.Set(key, value) + return abci.ResponseDeliverTx{Code: code.CodeTypeOK} +} + +// EndBlock implements ABCI. +func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + var err error + resp := abci.ResponseEndBlock{} + if resp.ValidatorUpdates, err = app.validatorUpdates(uint64(req.Height)); err != nil { + panic(err) + } + return resp +} + +// Commit implements ABCI. +func (app *Application) Commit() abci.ResponseCommit { + height, hash, err := app.state.Commit() + if err != nil { + panic(err) + } + if app.cfg.SnapshotInterval > 0 && height%app.cfg.SnapshotInterval == 0 { + snapshot, err := app.snapshots.Create(app.state) + if err != nil { + panic(err) + } + logger.Info("Created state sync snapshot", "height", snapshot.Height) + } + retainHeight := int64(0) + if app.cfg.RetainBlocks > 0 { + retainHeight = int64(height - app.cfg.RetainBlocks + 1) + } + return abci.ResponseCommit{ + Data: hash, + RetainHeight: retainHeight, + } +} + +// Query implements ABCI. +func (app *Application) Query(req abci.RequestQuery) abci.ResponseQuery { + return abci.ResponseQuery{ + Height: int64(app.state.Height), + Key: req.Data, + Value: []byte(app.state.Get(string(req.Data))), + } +} + +// ListSnapshots implements ABCI. +func (app *Application) ListSnapshots(req abci.RequestListSnapshots) abci.ResponseListSnapshots { + snapshots, err := app.snapshots.List() + if err != nil { + panic(err) + } + return abci.ResponseListSnapshots{Snapshots: snapshots} +} + +// LoadSnapshotChunk implements ABCI. +func (app *Application) LoadSnapshotChunk(req abci.RequestLoadSnapshotChunk) abci.ResponseLoadSnapshotChunk { + chunk, err := app.snapshots.LoadChunk(req.Height, req.Format, req.Chunk) + if err != nil { + panic(err) + } + return abci.ResponseLoadSnapshotChunk{Chunk: chunk} +} + +// OfferSnapshot implements ABCI. +func (app *Application) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOfferSnapshot { + if app.restoreSnapshot != nil { + panic("A snapshot is already being restored") + } + app.restoreSnapshot = req.Snapshot + app.restoreChunks = [][]byte{} + return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} +} + +// ApplySnapshotChunk implements ABCI. +func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) abci.ResponseApplySnapshotChunk { + if app.restoreSnapshot == nil { + panic("No restore in progress") + } + app.restoreChunks = append(app.restoreChunks, req.Chunk) + if len(app.restoreChunks) == int(app.restoreSnapshot.Chunks) { + bz := []byte{} + for _, chunk := range app.restoreChunks { + bz = append(bz, chunk...) + } + err := app.state.Import(app.restoreSnapshot.Height, bz) + if err != nil { + panic(err) + } + app.restoreSnapshot = nil + app.restoreChunks = nil + } + return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT} +} + +// validatorUpdates generates a validator set update. +func (app *Application) validatorUpdates(height uint64) (abci.ValidatorUpdates, error) { + updates := app.cfg.ValidatorUpdates[fmt.Sprintf("%v", height)] + if len(updates) == 0 { + return nil, nil + } + + valUpdates := abci.ValidatorUpdates{} + for keyString, power := range updates { + + keyBytes, err := base64.StdEncoding.DecodeString(keyString) + if err != nil { + return nil, fmt.Errorf("invalid base64 pubkey value %q: %w", keyString, err) + } + valUpdates = append(valUpdates, abci.UpdateValidator(keyBytes, int64(power), app.cfg.KeyType)) + } + return valUpdates, nil +} + +// parseTx parses a tx in 'key=value' format into a key and value. +func parseTx(tx []byte) (string, string, error) { + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid tx format: %q", string(tx)) + } + if len(parts[0]) == 0 { + return "", "", errors.New("key cannot be empty") + } + return string(parts[0]), string(parts[1]), nil +} diff --git a/test/e2e/app/config.go b/test/e2e/app/config.go new file mode 100644 index 000000000..38c967916 --- /dev/null +++ b/test/e2e/app/config.go @@ -0,0 +1,53 @@ +//nolint: goconst +package main + +import ( + "errors" + "fmt" + + "github.com/BurntSushi/toml" +) + +// Config is the application configuration. +type Config struct { + ChainID string `toml:"chain_id"` + Listen string + Protocol string + Dir string + PersistInterval uint64 `toml:"persist_interval"` + SnapshotInterval uint64 `toml:"snapshot_interval"` + RetainBlocks uint64 `toml:"retain_blocks"` + ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` + PrivValServer string `toml:"privval_server"` + PrivValKey string `toml:"privval_key"` + PrivValState string `toml:"privval_state"` + Misbehaviors map[string]string `toml:"misbehaviors"` + KeyType string `toml:"key_type"` +} + +// LoadConfig loads the configuration from disk. +func LoadConfig(file string) (*Config, error) { + cfg := &Config{ + Listen: "unix:///var/run/app.sock", + Protocol: "socket", + PersistInterval: 1, + } + _, err := toml.DecodeFile(file, &cfg) + if err != nil { + return nil, fmt.Errorf("failed to load config from %q: %w", file, err) + } + return cfg, cfg.Validate() +} + +// Validate validates the configuration. We don't do exhaustive config +// validation here, instead relying on Testnet.Validate() to handle it. +func (cfg Config) Validate() error { + switch { + case cfg.ChainID == "": + return errors.New("chain_id parameter is required") + case cfg.Listen == "" && cfg.Protocol != "builtin": + return errors.New("listen parameter is required") + default: + return nil + } +} diff --git a/test/e2e/app/main.go b/test/e2e/app/main.go new file mode 100644 index 000000000..25d995e2c --- /dev/null +++ b/test/e2e/app/main.go @@ -0,0 +1,237 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/spf13/viper" + + "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/ed25519" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" + tmnet "github.com/tendermint/tendermint/libs/net" + "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + mcs "github.com/tendermint/tendermint/test/maverick/consensus" + maverick "github.com/tendermint/tendermint/test/maverick/node" +) + +var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + +// main is the binary entrypoint. +func main() { + if len(os.Args) != 2 { + fmt.Printf("Usage: %v ", os.Args[0]) + return + } + configFile := "" + if len(os.Args) == 2 { + configFile = os.Args[1] + } + + if err := run(configFile); err != nil { + logger.Error(err.Error()) + os.Exit(1) + } +} + +// run runs the application - basically like main() with error handling. +func run(configFile string) error { + cfg, err := LoadConfig(configFile) + if err != nil { + return err + } + + // Start remote signer (must start before node if running builtin). + if cfg.PrivValServer != "" { + if err = startSigner(cfg); err != nil { + return err + } + if cfg.Protocol == "builtin" { + time.Sleep(1 * time.Second) + } + } + + // Start app server. + switch cfg.Protocol { + case "socket", "grpc": + err = startApp(cfg) + case "builtin": + if len(cfg.Misbehaviors) == 0 { + err = startNode(cfg) + } else { + err = startMaverick(cfg) + } + default: + err = fmt.Errorf("invalid protocol %q", cfg.Protocol) + } + if err != nil { + return err + } + + // Apparently there's no way to wait for the server, so we just sleep + for { + time.Sleep(1 * time.Hour) + } +} + +// startApp starts the application server, listening for connections from Tendermint. +func startApp(cfg *Config) error { + app, err := NewApplication(cfg) + if err != nil { + return err + } + server, err := server.NewServer(cfg.Listen, cfg.Protocol, app) + if err != nil { + return err + } + err = server.Start() + if err != nil { + return err + } + logger.Info(fmt.Sprintf("Server listening on %v (%v protocol)", cfg.Listen, cfg.Protocol)) + return nil +} + +// startNode starts a Tendermint node running the application directly. It assumes the Tendermint +// configuration is in $TMHOME/config/tendermint.toml. +// +// FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. +func startNode(cfg *Config) error { + app, err := NewApplication(cfg) + if err != nil { + return err + } + + tmcfg, nodeLogger, nodeKey, err := setupNode() + if err != nil { + return fmt.Errorf("failed to setup config: %w", err) + } + + pval, err := privval.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()) + if err != nil { + return err + } + n, err := node.NewNode(tmcfg, + pval, + nodeKey, + proxy.NewLocalClientCreator(app), + node.DefaultGenesisDocProviderFunc(tmcfg), + node.DefaultDBProvider, + node.DefaultMetricsProvider(tmcfg.Instrumentation), + nodeLogger, + ) + if err != nil { + return err + } + return n.Start() +} + +// startMaverick starts a Maverick node that runs the application directly. It assumes the Tendermint +// configuration is in $TMHOME/config/tendermint.toml. +func startMaverick(cfg *Config) error { + app, err := NewApplication(cfg) + if err != nil { + return err + } + + tmcfg, logger, nodeKey, err := setupNode() + if err != nil { + return fmt.Errorf("failed to setup config: %w", err) + } + + misbehaviors := make(map[int64]mcs.Misbehavior, len(cfg.Misbehaviors)) + for heightString, misbehaviorString := range cfg.Misbehaviors { + height, _ := strconv.ParseInt(heightString, 10, 64) + misbehaviors[height] = mcs.MisbehaviorList[misbehaviorString] + } + + n, err := maverick.NewNode(tmcfg, + maverick.LoadOrGenFilePV(tmcfg.PrivValidatorKeyFile(), tmcfg.PrivValidatorStateFile()), + nodeKey, + proxy.NewLocalClientCreator(app), + maverick.DefaultGenesisDocProviderFunc(tmcfg), + maverick.DefaultDBProvider, + maverick.DefaultMetricsProvider(tmcfg.Instrumentation), + logger, + misbehaviors, + ) + if err != nil { + return err + } + + return n.Start() +} + +// startSigner starts a signer server connecting to the given endpoint. +func startSigner(cfg *Config) error { + filePV := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) + + protocol, address := tmnet.ProtocolAndAddress(cfg.PrivValServer) + var dialFn privval.SocketDialer + switch protocol { + case "tcp": + dialFn = privval.DialTCPFn(address, 3*time.Second, ed25519.GenPrivKey()) + case "unix": + dialFn = privval.DialUnixFn(address) + default: + return fmt.Errorf("invalid privval protocol %q", protocol) + } + + endpoint := privval.NewSignerDialerEndpoint(logger, dialFn, + privval.SignerDialerEndpointRetryWaitInterval(1*time.Second), + privval.SignerDialerEndpointConnRetries(100)) + err := privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start() + if err != nil { + return err + } + logger.Info(fmt.Sprintf("Remote signer connecting to %v", cfg.PrivValServer)) + return nil +} + +func setupNode() (*config.Config, log.Logger, *p2p.NodeKey, error) { + var tmcfg *config.Config + + home := os.Getenv("TMHOME") + if home == "" { + return nil, nil, nil, errors.New("TMHOME not set") + } + viper.AddConfigPath(filepath.Join(home, "config")) + viper.SetConfigName("config") + err := viper.ReadInConfig() + if err != nil { + return nil, nil, nil, err + } + tmcfg = config.DefaultConfig() + err = viper.Unmarshal(tmcfg) + if err != nil { + return nil, nil, nil, err + } + tmcfg.SetRoot(home) + if err = tmcfg.ValidateBasic(); err != nil { + return nil, nil, nil, fmt.Errorf("error in config file: %w", err) + } + if tmcfg.LogFormat == config.LogFormatJSON { + logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + } + nodeLogger, err := tmflags.ParseLogLevel(tmcfg.LogLevel, logger, config.DefaultLogLevel()) + if err != nil { + return nil, nil, nil, err + } + nodeLogger = nodeLogger.With("module", "main") + + nodeKey, err := p2p.LoadOrGenNodeKey(tmcfg.NodeKeyFile()) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to load or gen node key %s: %w", tmcfg.NodeKeyFile(), err) + } + + return tmcfg, nodeLogger, nodeKey, nil +} diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go new file mode 100644 index 000000000..590b13cee --- /dev/null +++ b/test/e2e/app/snapshots.go @@ -0,0 +1,155 @@ +// nolint: gosec +package main + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "os" + "path/filepath" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" +) + +const ( + snapshotChunkSize = 1e6 +) + +// SnapshotStore stores state sync snapshots. Snapshots are stored simply as +// JSON files, and chunks are generated on-the-fly by splitting the JSON data +// into fixed-size chunks. +type SnapshotStore struct { + sync.RWMutex + dir string + metadata []abci.Snapshot +} + +// NewSnapshotStore creates a new snapshot store. +func NewSnapshotStore(dir string) (*SnapshotStore, error) { + store := &SnapshotStore{dir: dir} + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + if err := store.loadMetadata(); err != nil { + return nil, err + } + return store, nil +} + +// loadMetadata loads snapshot metadata. Does not take out locks, since it's +// called internally on construction. +func (s *SnapshotStore) loadMetadata() error { + file := filepath.Join(s.dir, "metadata.json") + metadata := []abci.Snapshot{} + + bz, err := ioutil.ReadFile(file) + switch { + case errors.Is(err, os.ErrNotExist): + case err != nil: + return fmt.Errorf("failed to load snapshot metadata from %q: %w", file, err) + } + if len(bz) != 0 { + err = json.Unmarshal(bz, &metadata) + if err != nil { + return fmt.Errorf("invalid snapshot data in %q: %w", file, err) + } + } + s.metadata = metadata + return nil +} + +// saveMetadata saves snapshot metadata. Does not take out locks, since it's +// called internally from e.g. Create(). +func (s *SnapshotStore) saveMetadata() error { + bz, err := json.Marshal(s.metadata) + if err != nil { + return err + } + + // save the file to a new file and move it to make saving atomic. + newFile := filepath.Join(s.dir, "metadata.json.new") + file := filepath.Join(s.dir, "metadata.json") + err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + if err != nil { + return err + } + return os.Rename(newFile, file) +} + +// Create creates a snapshot of the given application state's key/value pairs. +func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { + s.Lock() + defer s.Unlock() + bz, err := state.Export() + if err != nil { + return abci.Snapshot{}, err + } + hash := sha256.Sum256(bz) + snapshot := abci.Snapshot{ + Height: state.Height, + Format: 1, + Hash: hash[:], + Chunks: byteChunks(bz), + } + err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + if err != nil { + return abci.Snapshot{}, err + } + s.metadata = append(s.metadata, snapshot) + err = s.saveMetadata() + if err != nil { + return abci.Snapshot{}, err + } + return snapshot, nil +} + +// List lists available snapshots. +func (s *SnapshotStore) List() ([]*abci.Snapshot, error) { + s.RLock() + defer s.RUnlock() + snapshots := []*abci.Snapshot{} + for _, snapshot := range s.metadata { + s := snapshot // copy to avoid pointer to range variable + snapshots = append(snapshots, &s) + } + return snapshots, nil +} + +// LoadChunk loads a snapshot chunk. +func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([]byte, error) { + s.RLock() + defer s.RUnlock() + for _, snapshot := range s.metadata { + if snapshot.Height == height && snapshot.Format == format { + bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + if err != nil { + return nil, err + } + return byteChunk(bz, chunk), nil + } + } + return nil, nil +} + +// byteChunk returns the chunk at a given index from the full byte slice. +func byteChunk(bz []byte, index uint32) []byte { + start := int(index * snapshotChunkSize) + end := int((index + 1) * snapshotChunkSize) + switch { + case start >= len(bz): + return nil + case end >= len(bz): + return bz[start:] + default: + return bz[start:end] + } +} + +// byteChunks calculates the number of chunks in the byte slice. +func byteChunks(bz []byte) uint32 { + return uint32(math.Ceil(float64(len(bz)) / snapshotChunkSize)) +} diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go new file mode 100644 index 000000000..ad9960105 --- /dev/null +++ b/test/e2e/app/state.go @@ -0,0 +1,155 @@ +//nolint: gosec +package main + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "sort" + "sync" +) + +// State is the application state. +type State struct { + sync.RWMutex + Height uint64 + Values map[string]string + Hash []byte + + // private fields aren't marshalled to disk. + file string + persistInterval uint64 + initialHeight uint64 +} + +// NewState creates a new state. +func NewState(file string, persistInterval uint64) (*State, error) { + state := &State{ + Values: make(map[string]string), + file: file, + persistInterval: persistInterval, + } + state.Hash = hashItems(state.Values) + err := state.load() + switch { + case errors.Is(err, os.ErrNotExist): + case err != nil: + return nil, err + } + return state, nil +} + +// load loads state from disk. It does not take out a lock, since it is called +// during construction. +func (s *State) load() error { + bz, err := ioutil.ReadFile(s.file) + if err != nil { + return fmt.Errorf("failed to read state from %q: %w", s.file, err) + } + err = json.Unmarshal(bz, s) + if err != nil { + return fmt.Errorf("invalid state data in %q: %w", s.file, err) + } + return nil +} + +// save saves the state to disk. It does not take out a lock since it is called +// internally by Commit which does lock. +func (s *State) save() error { + bz, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("failed to marshal state: %w", err) + } + // We write the state to a separate file and move it to the destination, to + // make it atomic. + newFile := fmt.Sprintf("%v.new", s.file) + err = ioutil.WriteFile(newFile, bz, 0644) + if err != nil { + return fmt.Errorf("failed to write state to %q: %w", s.file, err) + } + return os.Rename(newFile, s.file) +} + +// Export exports key/value pairs as JSON, used for state sync snapshots. +func (s *State) Export() ([]byte, error) { + s.RLock() + defer s.RUnlock() + return json.Marshal(s.Values) +} + +// Import imports key/value pairs from JSON bytes, used for InitChain.AppStateBytes and +// state sync snapshots. It also saves the state once imported. +func (s *State) Import(height uint64, jsonBytes []byte) error { + s.Lock() + defer s.Unlock() + values := map[string]string{} + err := json.Unmarshal(jsonBytes, &values) + if err != nil { + return fmt.Errorf("failed to decode imported JSON data: %w", err) + } + s.Height = height + s.Values = values + s.Hash = hashItems(values) + return s.save() +} + +// Get fetches a value. A missing value is returned as an empty string. +func (s *State) Get(key string) string { + s.RLock() + defer s.RUnlock() + return s.Values[key] +} + +// Set sets a value. Setting an empty value is equivalent to deleting it. +func (s *State) Set(key, value string) { + s.Lock() + defer s.Unlock() + if value == "" { + delete(s.Values, key) + } else { + s.Values[key] = value + } +} + +// Commit commits the current state. +func (s *State) Commit() (uint64, []byte, error) { + s.Lock() + defer s.Unlock() + s.Hash = hashItems(s.Values) + switch { + case s.Height > 0: + s.Height++ + case s.initialHeight > 0: + s.Height = s.initialHeight + default: + s.Height = 1 + } + if s.persistInterval > 0 && s.Height%s.persistInterval == 0 { + err := s.save() + if err != nil { + return 0, nil, err + } + } + return s.Height, s.Hash, nil +} + +// hashItems hashes a set of key/value items. +func hashItems(items map[string]string) []byte { + keys := make([]string, 0, len(items)) + for key := range items { + keys = append(keys, key) + } + sort.Strings(keys) + + hasher := sha256.New() + for _, key := range keys { + _, _ = hasher.Write([]byte(key)) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(items[key])) + _, _ = hasher.Write([]byte{0}) + } + return hasher.Sum(nil) +} diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile new file mode 100644 index 000000000..825aa7f0d --- /dev/null +++ b/test/e2e/docker/Dockerfile @@ -0,0 +1,33 @@ +# We need to build in a Linux environment to support C libraries, e.g. RocksDB. +# We use Debian instead of Alpine, so that we can use binary database packages +# instead of spending time compiling them. +FROM golang:1.15 + +RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null +RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null + +# Set up build directory /src/tendermint +ENV TENDERMINT_BUILD_OPTIONS badgerdb,boltdb,cleveldb,rocksdb +WORKDIR /src/tendermint + +# Fetch dependencies separately (for layer caching) +COPY go.mod go.sum ./ +RUN go mod download + +# Build Tendermint and install into /usr/bin/tendermint +COPY . . +RUN make build && cp build/tendermint /usr/bin/tendermint +COPY test/e2e/docker/entrypoint* /usr/bin/ +RUN cd test/e2e && make maverick && cp build/maverick /usr/bin/maverick +RUN cd test/e2e && make app && cp build/app /usr/bin/app + +# Set up runtime directory. We don't use a separate runtime image since we need +# e.g. leveldb and rocksdb which are already installed in the build image. +WORKDIR /tendermint +VOLUME /tendermint +ENV TMHOME=/tendermint + +EXPOSE 26656 26657 26660 +ENTRYPOINT ["/usr/bin/entrypoint"] +CMD ["node"] +STOPSIGNAL SIGTERM diff --git a/test/e2e/docker/entrypoint b/test/e2e/docker/entrypoint new file mode 100755 index 000000000..50d57a313 --- /dev/null +++ b/test/e2e/docker/entrypoint @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Forcibly remove any stray UNIX sockets left behind from previous runs +rm -rf /var/run/privval.sock /var/run/app.sock + +/usr/bin/app /tendermint/config/app.toml & + +sleep 1 + +/usr/bin/tendermint "$@" diff --git a/test/e2e/docker/entrypoint-builtin b/test/e2e/docker/entrypoint-builtin new file mode 100755 index 000000000..3bec08671 --- /dev/null +++ b/test/e2e/docker/entrypoint-builtin @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +# Forcibly remove any stray UNIX sockets left behind from previous runs +rm -rf /var/run/privval.sock /var/run/app.sock + +/usr/bin/app /tendermint/config/app.toml diff --git a/test/e2e/docker/entrypoint-maverick b/test/e2e/docker/entrypoint-maverick new file mode 100755 index 000000000..9469e2447 --- /dev/null +++ b/test/e2e/docker/entrypoint-maverick @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Forcibly remove any stray UNIX sockets left behind from previous runs +rm -rf /var/run/privval.sock /var/run/app.sock + +/usr/bin/app /tendermint/config/app.toml & + +sleep 1 + +/usr/bin/maverick "$@" diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go new file mode 100644 index 000000000..b67500168 --- /dev/null +++ b/test/e2e/generator/generate.go @@ -0,0 +1,264 @@ +package main + +import ( + "fmt" + "math/rand" + "sort" + "strconv" + "strings" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +var ( + // testnetCombinations defines global testnet options, where we generate a + // separate testnet for each combination (Cartesian product) of options. + testnetCombinations = map[string][]interface{}{ + "topology": {"single", "quad", "large"}, + "ipv6": {false, true}, + "initialHeight": {0, 1000}, + "initialState": { + map[string]string{}, + map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, + }, + "validators": {"genesis", "initchain"}, + "keyType": {types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1}, + } + + // The following specify randomly chosen values for testnet nodes. + nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} + nodeABCIProtocols = uniformChoice{"unix", "tcp", "grpc", "builtin"} + nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp"} + nodeFastSyncs = uniformChoice{"", "v0", "v1", "v2"} + nodeStateSyncs = uniformChoice{false, true} + nodePersistIntervals = uniformChoice{0, 1, 5} + nodeSnapshotIntervals = uniformChoice{0, 3} + nodeRetainBlocks = uniformChoice{0, 1, 5} + nodePerturbations = probSetChoice{ + "disconnect": 0.1, + "pause": 0.1, + "kill": 0.1, + "restart": 0.1, + } + nodeMisbehaviors = weightedChoice{ + // FIXME evidence disabled due to node panicing when not + // having sufficient block history to process evidence. + // https://github.com/tendermint/tendermint/issues/5617 + // misbehaviorOption{"double-prevote"}: 1, + misbehaviorOption{}: 9, + } +) + +// Generate generates random testnets using the given RNG. +func Generate(r *rand.Rand) ([]e2e.Manifest, error) { + manifests := []e2e.Manifest{} + for _, opt := range combinations(testnetCombinations) { + manifest, err := generateTestnet(r, opt) + if err != nil { + return nil, err + } + manifests = append(manifests, manifest) + } + return manifests, nil +} + +// generateTestnet generates a single testnet with the given options. +func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { + manifest := e2e.Manifest{ + IPv6: opt["ipv6"].(bool), + InitialHeight: int64(opt["initialHeight"].(int)), + InitialState: opt["initialState"].(map[string]string), + Validators: &map[string]int64{}, + ValidatorUpdates: map[string]map[string]int64{}, + Nodes: map[string]*e2e.ManifestNode{}, + KeyType: opt["keyType"].(string), + } + + var numSeeds, numValidators, numFulls int + switch opt["topology"].(string) { + case "single": + numValidators = 1 + case "quad": + numValidators = 4 + case "large": + // FIXME Networks are kept small since large ones use too much CPU. + numSeeds = r.Intn(4) + numValidators = 4 + r.Intn(7) + numFulls = r.Intn(5) + default: + return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) + } + + // First we generate seed nodes, starting at the initial height. + for i := 1; i <= numSeeds; i++ { + manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( + r, e2e.ModeSeed, 0, manifest.InitialHeight, false) + } + + // Next, we generate validators. We make sure a BFT quorum of validators start + // at the initial height, and that we have two archive nodes. We also set up + // the initial validator set, and validator set updates for delayed nodes. + nextStartAt := manifest.InitialHeight + 5 + quorum := numValidators*2/3 + 1 + for i := 1; i <= numValidators; i++ { + startAt := int64(0) + if i > quorum { + startAt = nextStartAt + nextStartAt += 5 + } + name := fmt.Sprintf("validator%02d", i) + manifest.Nodes[name] = generateNode( + r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) + + if startAt == 0 { + (*manifest.Validators)[name] = int64(30 + r.Intn(71)) + } else { + manifest.ValidatorUpdates[fmt.Sprint(startAt+5)] = map[string]int64{ + name: int64(30 + r.Intn(71)), + } + } + } + + // Move validators to InitChain if specified. + switch opt["validators"].(string) { + case "genesis": + case "initchain": + manifest.ValidatorUpdates["0"] = *manifest.Validators + manifest.Validators = &map[string]int64{} + default: + return manifest, fmt.Errorf("invalid validators option %q", opt["validators"]) + } + + // Finally, we generate random full nodes. + for i := 1; i <= numFulls; i++ { + startAt := int64(0) + if r.Float64() >= 0.5 { + startAt = nextStartAt + nextStartAt += 5 + } + manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( + r, e2e.ModeFull, startAt, manifest.InitialHeight, false) + } + + // We now set up peer discovery for nodes. Seed nodes are fully meshed with + // each other, while non-seed nodes either use a set of random seeds or a + // set of random peers that start before themselves. + var seedNames, peerNames []string + for name, node := range manifest.Nodes { + if node.Mode == string(e2e.ModeSeed) { + seedNames = append(seedNames, name) + } else { + peerNames = append(peerNames, name) + } + } + + for _, name := range seedNames { + for _, otherName := range seedNames { + if name != otherName { + manifest.Nodes[name].Seeds = append(manifest.Nodes[name].Seeds, otherName) + } + } + } + + sort.Slice(peerNames, func(i, j int) bool { + iName, jName := peerNames[i], peerNames[j] + switch { + case manifest.Nodes[iName].StartAt < manifest.Nodes[jName].StartAt: + return true + case manifest.Nodes[iName].StartAt > manifest.Nodes[jName].StartAt: + return false + default: + return strings.Compare(iName, jName) == -1 + } + }) + for i, name := range peerNames { + if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { + manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) + } else if i > 0 { + manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) + } + } + + return manifest, nil +} + +// generateNode randomly generates a node, with some constraints to avoid +// generating invalid configurations. We do not set Seeds or PersistentPeers +// here, since we need to know the overall network topology and startup +// sequencing. +func generateNode( + r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, +) *e2e.ManifestNode { + node := e2e.ManifestNode{ + Mode: string(mode), + StartAt: startAt, + Database: nodeDatabases.Choose(r).(string), + ABCIProtocol: nodeABCIProtocols.Choose(r).(string), + PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), + FastSync: nodeFastSyncs.Choose(r).(string), + StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, + PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), + SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), + RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), + Perturb: nodePerturbations.Choose(r), + } + + // If this node is forced to be an archive node, retain all blocks and + // enable state sync snapshotting. + if forceArchive { + node.RetainBlocks = 0 + node.SnapshotInterval = 3 + } + + if node.Mode == "validator" { + misbehaveAt := startAt + 5 + int64(r.Intn(10)) + if startAt == 0 { + misbehaveAt += initialHeight - 1 + } + node.Misbehaviors = nodeMisbehaviors.Choose(r).(misbehaviorOption).atHeight(misbehaveAt) + if len(node.Misbehaviors) != 0 { + node.PrivvalProtocol = "file" + } + } + + // If a node which does not persist state also does not retain blocks, randomly + // choose to either persist state or retain all blocks. + if node.PersistInterval != nil && *node.PersistInterval == 0 && node.RetainBlocks > 0 { + if r.Float64() > 0.5 { + node.RetainBlocks = 0 + } else { + node.PersistInterval = ptrUint64(node.RetainBlocks) + } + } + + // If either PersistInterval or SnapshotInterval are greater than RetainBlocks, + // expand the block retention time. + if node.RetainBlocks > 0 { + if node.PersistInterval != nil && node.RetainBlocks < *node.PersistInterval { + node.RetainBlocks = *node.PersistInterval + } + if node.RetainBlocks < node.SnapshotInterval { + node.RetainBlocks = node.SnapshotInterval + } + } + + return &node +} + +func ptrUint64(i uint64) *uint64 { + return &i +} + +type misbehaviorOption struct { + misbehavior string +} + +func (m misbehaviorOption) atHeight(height int64) map[string]string { + misbehaviorMap := make(map[string]string) + if m.misbehavior == "" { + return misbehaviorMap + } + misbehaviorMap[strconv.Itoa(int(height))] = m.misbehavior + return misbehaviorMap +} diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go new file mode 100644 index 000000000..f17b4f3f4 --- /dev/null +++ b/test/e2e/generator/main.go @@ -0,0 +1,98 @@ +//nolint: gosec +package main + +import ( + "fmt" + "math" + "math/rand" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/libs/log" +) + +const ( + randomSeed int64 = 4827085738 +) + +var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + +func main() { + NewCLI().Run() +} + +// CLI is the Cobra-based command-line interface. +type CLI struct { + root *cobra.Command +} + +// NewCLI sets up the CLI. +func NewCLI() *CLI { + cli := &CLI{} + cli.root = &cobra.Command{ + Use: "generator", + Short: "End-to-end testnet generator", + SilenceUsage: true, + SilenceErrors: true, // we'll output them ourselves in Run() + RunE: func(cmd *cobra.Command, args []string) error { + dir, err := cmd.Flags().GetString("dir") + if err != nil { + return err + } + groups, err := cmd.Flags().GetInt("groups") + if err != nil { + return err + } + return cli.generate(dir, groups) + }, + } + + cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests") + _ = cli.root.MarkPersistentFlagRequired("dir") + cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") + + return cli +} + +// generate generates manifests in a directory. +func (cli *CLI) generate(dir string, groups int) error { + err := os.MkdirAll(dir, 0755) + if err != nil { + return err + } + + manifests, err := Generate(rand.New(rand.NewSource(randomSeed))) + if err != nil { + return err + } + if groups <= 0 { + for i, manifest := range manifests { + err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-%04d.toml", i))) + if err != nil { + return err + } + } + } else { + groupSize := int(math.Ceil(float64(len(manifests)) / float64(groups))) + for g := 0; g < groups; g++ { + for i := 0; i < groupSize && g*groupSize+i < len(manifests); i++ { + manifest := manifests[g*groupSize+i] + err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-group%02d-%04d.toml", g, i))) + if err != nil { + return err + } + } + } + } + return nil +} + +// Run runs the CLI. +func (cli *CLI) Run() { + if err := cli.root.Execute(); err != nil { + logger.Error(err.Error()) + os.Exit(1) + } +} diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go new file mode 100644 index 000000000..ec59a01b2 --- /dev/null +++ b/test/e2e/generator/random.go @@ -0,0 +1,107 @@ +package main + +import ( + "math/rand" + "sort" +) + +// combinations takes input in the form of a map of item lists, and returns a +// list of all combinations of each item for each key. E.g.: +// +// {"foo": [1, 2, 3], "bar": [4, 5, 6]} +// +// Will return the following maps: +// +// {"foo": 1, "bar": 4} +// {"foo": 1, "bar": 5} +// {"foo": 1, "bar": 6} +// {"foo": 2, "bar": 4} +// {"foo": 2, "bar": 5} +// {"foo": 2, "bar": 6} +// {"foo": 3, "bar": 4} +// {"foo": 3, "bar": 5} +// {"foo": 3, "bar": 6} +func combinations(items map[string][]interface{}) []map[string]interface{} { + keys := []string{} + for key := range items { + keys = append(keys, key) + } + sort.Strings(keys) + return combiner(map[string]interface{}{}, keys, items) +} + +// combiner is a utility function for combinations. +func combiner(head map[string]interface{}, pending []string, items map[string][]interface{}) []map[string]interface{} { + if len(pending) == 0 { + return []map[string]interface{}{head} + } + key, pending := pending[0], pending[1:] + + result := []map[string]interface{}{} + for _, value := range items[key] { + path := map[string]interface{}{} + for k, v := range head { + path[k] = v + } + path[key] = value + result = append(result, combiner(path, pending, items)...) + } + return result +} + +// uniformChoice chooses a single random item from the argument list, uniformly weighted. +type uniformChoice []interface{} + +func (uc uniformChoice) Choose(r *rand.Rand) interface{} { + return uc[r.Intn(len(uc))] +} + +// weightedChoice chooses a single random key from a map of keys and weights. +type weightedChoice map[interface{}]uint + +func (wc weightedChoice) Choose(r *rand.Rand) interface{} { + total := 0 + choices := make([]interface{}, 0, len(wc)) + for choice, weight := range wc { + total += int(weight) + choices = append(choices, choice) + } + + rem := r.Intn(total) + for _, choice := range choices { + rem -= int(wc[choice]) + if rem <= 0 { + return choice + } + } + + return nil +} + +// probSetChoice picks a set of strings based on each string's probability (0-1). +type probSetChoice map[string]float64 + +func (pc probSetChoice) Choose(r *rand.Rand) []string { + choices := []string{} + for item, prob := range pc { + if r.Float64() <= prob { + choices = append(choices, item) + } + } + return choices +} + +// uniformSetChoice picks a set of strings with uniform probability, picking at least one. +type uniformSetChoice []string + +func (usc uniformSetChoice) Choose(r *rand.Rand) []string { + choices := []string{} + indexes := r.Perm(len(usc)) + if len(indexes) > 1 { + indexes = indexes[:1+r.Intn(len(indexes)-1)] + } + for _, i := range indexes { + choices = append(choices, usc[i]) + } + return choices +} diff --git a/test/e2e/generator/random_test.go b/test/e2e/generator/random_test.go new file mode 100644 index 000000000..3fbb19ab5 --- /dev/null +++ b/test/e2e/generator/random_test.go @@ -0,0 +1,31 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCombinations(t *testing.T) { + input := map[string][]interface{}{ + "bool": {false, true}, + "int": {1, 2, 3}, + "string": {"foo", "bar"}, + } + + c := combinations(input) + assert.Equal(t, []map[string]interface{}{ + {"bool": false, "int": 1, "string": "foo"}, + {"bool": false, "int": 1, "string": "bar"}, + {"bool": false, "int": 2, "string": "foo"}, + {"bool": false, "int": 2, "string": "bar"}, + {"bool": false, "int": 3, "string": "foo"}, + {"bool": false, "int": 3, "string": "bar"}, + {"bool": true, "int": 1, "string": "foo"}, + {"bool": true, "int": 1, "string": "bar"}, + {"bool": true, "int": 2, "string": "foo"}, + {"bool": true, "int": 2, "string": "bar"}, + {"bool": true, "int": 3, "string": "foo"}, + {"bool": true, "int": 3, "string": "bar"}, + }, c) +} diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml new file mode 100644 index 000000000..91ed89d63 --- /dev/null +++ b/test/e2e/networks/ci.toml @@ -0,0 +1,87 @@ +# This testnet is run by CI, and attempts to cover a broad range of +# functionality with a single network. + +initial_height = 1000 +initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } + +[validators] +validator01 = 100 + +[validator_update.0] +validator01 = 10 +validator02 = 20 +validator03 = 30 +validator04 = 40 + +[validator_update.1010] +validator05 = 50 + +# validator03 gets killed and validator05 has lots of perturbations, so weight them low. +[validator_update.1020] +validator01 = 100 +validator02 = 100 +validator03 = 50 +validator04 = 100 +validator05 = 50 + +[node.seed01] +mode = "seed" +seeds = ["seed02"] + +[node.seed02] +mode = "seed" +seeds = ["seed01"] + +[node.validator01] +seeds = ["seed01"] +snapshot_interval = 5 +perturb = ["disconnect"] +misbehaviors = { 1018 = "double-prevote" } + +[node.validator02] +seeds = ["seed02"] +database = "boltdb" +abci_protocol = "tcp" +privval_protocol = "tcp" +persist_interval = 0 +perturb = ["restart"] + +[node.validator03] +seeds = ["seed01"] +database = "badgerdb" +abci_protocol = "grpc" +privval_protocol = "unix" +persist_interval = 3 +retain_blocks = 3 +perturb = ["kill"] + +[node.validator04] +persistent_peers = ["validator01"] +database = "rocksdb" +abci_protocol = "builtin" +perturb = ["pause"] + +[node.validator05] +start_at = 1005 # Becomes part of the validator set at 1010 +seeds = ["seed02"] +database = "cleveldb" +fast_sync = "v0" +abci_protocol = "grpc" +privval_protocol = "tcp" +perturb = ["kill", "pause", "disconnect", "restart"] + +[node.full01] +start_at = 1010 +mode = "full" +fast_sync = "v1" +persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] +retain_blocks = 1 +perturb = ["restart"] + +[node.full02] +start_at = 1015 +mode = "full" +fast_sync = "v2" +state_sync = true +seeds = ["seed01"] +perturb = ["restart"] diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml new file mode 100644 index 000000000..37f711a91 --- /dev/null +++ b/test/e2e/networks/simple.toml @@ -0,0 +1,5 @@ +[node.validator01] +[node.validator02] +[node.validator03] +[node.validator04] + diff --git a/test/e2e/networks/single.toml b/test/e2e/networks/single.toml new file mode 100644 index 000000000..54c40b19e --- /dev/null +++ b/test/e2e/networks/single.toml @@ -0,0 +1 @@ +[node.validator] diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go new file mode 100644 index 000000000..3fbf14558 --- /dev/null +++ b/test/e2e/pkg/manifest.go @@ -0,0 +1,151 @@ +package e2e + +import ( + "fmt" + "os" + + "github.com/BurntSushi/toml" +) + +// Manifest represents a TOML testnet manifest. +type Manifest struct { + // IPv6 uses IPv6 networking instead of IPv4. Defaults to IPv4. + IPv6 bool `toml:"ipv6"` + + // InitialHeight specifies the initial block height, set in genesis. Defaults to 1. + InitialHeight int64 `toml:"initial_height"` + + // InitialState is an initial set of key/value pairs for the application, + // set in genesis. Defaults to nothing. + InitialState map[string]string `toml:"initial_state"` + + // Validators is the initial validator set in genesis, given as node names + // and power: + // + // validators = { validator01 = 10; validator02 = 20; validator03 = 30 } + // + // Defaults to all nodes that have mode=validator at power 100. Explicitly + // specifying an empty set will start with no validators in genesis, and + // the application must return the validator set in InitChain via the + // setting validator_update.0 (see below). + Validators *map[string]int64 `toml:"validators"` + + // ValidatorUpdates is a map of heights to validator names and their power, + // and will be returned by the ABCI application. For example, the following + // changes the power of validator01 and validator02 at height 1000: + // + // [validator_update.1000] + // validator01 = 20 + // validator02 = 10 + // + // Specifying height 0 returns the validator update during InitChain. The + // application returns the validator updates as-is, i.e. removing a + // validator must be done by returning it with power 0, and any validators + // not specified are not changed. + ValidatorUpdates map[string]map[string]int64 `toml:"validator_update"` + + // Nodes specifies the network nodes. At least one node must be given. + Nodes map[string]*ManifestNode `toml:"node"` + + // KeyType sets the curve that will be used by validators. + // Options are ed25519 & secp256k1 + KeyType string `toml:"key_type"` +} + +// ManifestNode represents a node in a testnet manifest. +type ManifestNode struct { + // Mode specifies the type of node: "validator", "full", or "seed". Defaults to + // "validator". Full nodes do not get a signing key (a dummy key is generated), + // and seed nodes run in seed mode with the PEX reactor enabled. + Mode string `toml:"mode"` + + // Seeds is the list of node names to use as P2P seed nodes. Defaults to none. + Seeds []string `toml:"seeds"` + + // PersistentPeers is a list of node names to maintain persistent P2P + // connections to. If neither seeds nor persistent peers are specified, + // this defaults to all other nodes in the network. + PersistentPeers []string `toml:"persistent_peers"` + + // Database specifies the database backend: "goleveldb", "cleveldb", + // "rocksdb", "boltdb", or "badgerdb". Defaults to goleveldb. + Database string `toml:"database"` + + // ABCIProtocol specifies the protocol used to communicate with the ABCI + // application: "unix", "tcp", "grpc", or "builtin". Defaults to unix. + // builtin will build a complete Tendermint node into the application and + // launch it instead of launching a separate Tendermint process. + ABCIProtocol string `toml:"abci_protocol"` + + // PrivvalProtocol specifies the protocol used to sign consensus messages: + // "file", "unix", or "tcp". Defaults to "file". For unix and tcp, the ABCI + // application will launch a remote signer client in a separate goroutine. + // Only nodes with mode=validator will actually make use of this. + PrivvalProtocol string `toml:"privval_protocol"` + + // StartAt specifies the block height at which the node will be started. The + // runner will wait for the network to reach at least this block height. + StartAt int64 `toml:"start_at"` + + // FastSync specifies the fast sync mode: "" (disable), "v0", "v1", or "v2". + // Defaults to disabled. + FastSync string `toml:"fast_sync"` + + // StateSync enables state sync. The runner automatically configures trusted + // block hashes and RPC servers. At least one node in the network must have + // SnapshotInterval set to non-zero, and the state syncing node must have + // StartAt set to an appropriate height where a snapshot is available. + StateSync bool `toml:"state_sync"` + + // PersistInterval specifies the height interval at which the application + // will persist state to disk. Defaults to 1 (every height), setting this to + // 0 disables state persistence. + PersistInterval *uint64 `toml:"persist_interval"` + + // SnapshotInterval specifies the height interval at which the application + // will take state sync snapshots. Defaults to 0 (disabled). + SnapshotInterval uint64 `toml:"snapshot_interval"` + + // RetainBlocks specifies the number of recent blocks to retain. Defaults to + // 0, which retains all blocks. Must be greater that PersistInterval and + // SnapshotInterval. + RetainBlocks uint64 `toml:"retain_blocks"` + + // Perturb lists perturbations to apply to the node after it has been + // started and synced with the network: + // + // disconnect: temporarily disconnects the node from the network + // kill: kills the node with SIGKILL then restarts it + // pause: temporarily pauses (freezes) the node + // restart: restarts the node, shutting it down with SIGTERM + Perturb []string `toml:"perturb"` + + // Misbehaviors sets how a validator behaves during consensus at a + // certain height. Multiple misbehaviors at different heights can be used + // + // An example of misbehaviors + // { 10 = "double-prevote", 20 = "double-prevote"} + // + // For more information, look at the readme in the maverick folder. + // A list of all behaviors can be found in ../maverick/consensus/behavior.go + Misbehaviors map[string]string `toml:"misbehaviors"` +} + +// Save saves the testnet manifest to a file. +func (m Manifest) Save(file string) error { + f, err := os.Create(file) + if err != nil { + return fmt.Errorf("failed to create manifest file %q: %w", file, err) + } + return toml.NewEncoder(f).Encode(m) +} + +// LoadManifest loads a testnet manifest from a file. +func LoadManifest(file string) (Manifest, error) { + manifest := Manifest{} + _, err := toml.DecodeFile(file, &manifest) + if err != nil { + return manifest, fmt.Errorf("failed to load testnet manifest %q: %w", file, err) + } + return manifest, nil +} diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go new file mode 100644 index 000000000..df2be5699 --- /dev/null +++ b/test/e2e/pkg/testnet.go @@ -0,0 +1,555 @@ +//nolint: gosec +package e2e + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + mcs "github.com/tendermint/tendermint/test/maverick/consensus" + "github.com/tendermint/tendermint/types" +) + +const ( + randomSeed int64 = 2308084734268 + proxyPortFirst uint32 = 5701 + networkIPv4 = "10.186.73.0/24" + networkIPv6 = "fd80:b10c::/48" +) + +type Mode string +type Protocol string +type Perturbation string + +const ( + ModeValidator Mode = "validator" + ModeFull Mode = "full" + ModeSeed Mode = "seed" + + ProtocolBuiltin Protocol = "builtin" + ProtocolFile Protocol = "file" + ProtocolGRPC Protocol = "grpc" + ProtocolTCP Protocol = "tcp" + ProtocolUNIX Protocol = "unix" + + PerturbationDisconnect Perturbation = "disconnect" + PerturbationKill Perturbation = "kill" + PerturbationPause Perturbation = "pause" + PerturbationRestart Perturbation = "restart" +) + +// Testnet represents a single testnet. +type Testnet struct { + Name string + File string + Dir string + IP *net.IPNet + InitialHeight int64 + InitialState map[string]string + Validators map[*Node]int64 + ValidatorUpdates map[int64]map[*Node]int64 + Nodes []*Node + KeyType string +} + +// Node represents a Tendermint node in a testnet. +type Node struct { + Name string + Testnet *Testnet + Mode Mode + PrivvalKey crypto.PrivKey + NodeKey crypto.PrivKey + IP net.IP + ProxyPort uint32 + StartAt int64 + FastSync string + StateSync bool + Database string + ABCIProtocol Protocol + PrivvalProtocol Protocol + PersistInterval uint64 + SnapshotInterval uint64 + RetainBlocks uint64 + Seeds []*Node + PersistentPeers []*Node + Perturbations []Perturbation + Misbehaviors map[int64]string +} + +// LoadTestnet loads a testnet from a manifest file, using the filename to +// determine the testnet name and directory (from the basename of the file). +// The testnet generation must be deterministic, since it is generated +// separately by the runner and the test cases. For this reason, testnets use a +// random seed to generate e.g. keys. +func LoadTestnet(file string) (*Testnet, error) { + manifest, err := LoadManifest(file) + if err != nil { + return nil, err + } + dir := strings.TrimSuffix(file, filepath.Ext(file)) + + // Set up resource generators. These must be deterministic. + netAddress := networkIPv4 + if manifest.IPv6 { + netAddress = networkIPv6 + } + _, ipNet, err := net.ParseCIDR(netAddress) + if err != nil { + return nil, fmt.Errorf("invalid IP network address %q: %w", netAddress, err) + } + + ipGen := newIPGenerator(ipNet) + keyGen := newKeyGenerator(randomSeed) + proxyPortGen := newPortGenerator(proxyPortFirst) + + testnet := &Testnet{ + Name: filepath.Base(dir), + File: file, + Dir: dir, + IP: ipGen.Network(), + InitialHeight: 1, + InitialState: manifest.InitialState, + Validators: map[*Node]int64{}, + ValidatorUpdates: map[int64]map[*Node]int64{}, + Nodes: []*Node{}, + KeyType: "ed25519", + } + if len(manifest.KeyType) != 0 { + testnet.KeyType = manifest.KeyType + } + if manifest.InitialHeight > 0 { + testnet.InitialHeight = manifest.InitialHeight + } + + // Set up nodes, in alphabetical order (IPs and ports get same order). + nodeNames := []string{} + for name := range manifest.Nodes { + nodeNames = append(nodeNames, name) + } + sort.Strings(nodeNames) + + for _, name := range nodeNames { + nodeManifest := manifest.Nodes[name] + node := &Node{ + Name: name, + Testnet: testnet, + PrivvalKey: keyGen.Generate(manifest.KeyType), + NodeKey: keyGen.Generate("ed25519"), + IP: ipGen.Next(), + ProxyPort: proxyPortGen.Next(), + Mode: ModeValidator, + Database: "goleveldb", + ABCIProtocol: ProtocolUNIX, + PrivvalProtocol: ProtocolFile, + StartAt: nodeManifest.StartAt, + FastSync: nodeManifest.FastSync, + StateSync: nodeManifest.StateSync, + PersistInterval: 1, + SnapshotInterval: nodeManifest.SnapshotInterval, + RetainBlocks: nodeManifest.RetainBlocks, + Perturbations: []Perturbation{}, + Misbehaviors: make(map[int64]string), + } + if node.StartAt == testnet.InitialHeight { + node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this + } + if nodeManifest.Mode != "" { + node.Mode = Mode(nodeManifest.Mode) + } + if nodeManifest.Database != "" { + node.Database = nodeManifest.Database + } + if nodeManifest.ABCIProtocol != "" { + node.ABCIProtocol = Protocol(nodeManifest.ABCIProtocol) + } + if nodeManifest.PrivvalProtocol != "" { + node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocol) + } + if nodeManifest.PersistInterval != nil { + node.PersistInterval = *nodeManifest.PersistInterval + } + for _, p := range nodeManifest.Perturb { + node.Perturbations = append(node.Perturbations, Perturbation(p)) + } + for heightString, misbehavior := range nodeManifest.Misbehaviors { + height, err := strconv.ParseInt(heightString, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse height %s to int64: %w", heightString, err) + } + node.Misbehaviors[height] = misbehavior + } + testnet.Nodes = append(testnet.Nodes, node) + } + + // We do a second pass to set up seeds and persistent peers, which allows graph cycles. + for _, node := range testnet.Nodes { + nodeManifest, ok := manifest.Nodes[node.Name] + if !ok { + return nil, fmt.Errorf("failed to look up manifest for node %q", node.Name) + } + for _, seedName := range nodeManifest.Seeds { + seed := testnet.LookupNode(seedName) + if seed == nil { + return nil, fmt.Errorf("unknown seed %q for node %q", seedName, node.Name) + } + node.Seeds = append(node.Seeds, seed) + } + for _, peerName := range nodeManifest.PersistentPeers { + peer := testnet.LookupNode(peerName) + if peer == nil { + return nil, fmt.Errorf("unknown persistent peer %q for node %q", peerName, node.Name) + } + node.PersistentPeers = append(node.PersistentPeers, peer) + } + + // If there are no seeds or persistent peers specified, default to persistent + // connections to all other nodes. + if len(node.PersistentPeers) == 0 && len(node.Seeds) == 0 { + for _, peer := range testnet.Nodes { + if peer.Name == node.Name { + continue + } + node.PersistentPeers = append(node.PersistentPeers, peer) + } + } + } + + // Set up genesis validators. If not specified explicitly, use all validator nodes. + if manifest.Validators != nil { + for validatorName, power := range *manifest.Validators { + validator := testnet.LookupNode(validatorName) + if validator == nil { + return nil, fmt.Errorf("unknown validator %q", validatorName) + } + testnet.Validators[validator] = power + } + } else { + for _, node := range testnet.Nodes { + if node.Mode == ModeValidator { + testnet.Validators[node] = 100 + } + } + } + + // Set up validator updates. + for heightStr, validators := range manifest.ValidatorUpdates { + height, err := strconv.Atoi(heightStr) + if err != nil { + return nil, fmt.Errorf("invalid validator update height %q: %w", height, err) + } + valUpdate := map[*Node]int64{} + for name, power := range validators { + node := testnet.LookupNode(name) + if node == nil { + return nil, fmt.Errorf("unknown validator %q for update at height %v", name, height) + } + valUpdate[node] = power + } + testnet.ValidatorUpdates[int64(height)] = valUpdate + } + + return testnet, testnet.Validate() +} + +// Validate validates a testnet. +func (t Testnet) Validate() error { + if t.Name == "" { + return errors.New("network has no name") + } + if t.IP == nil { + return errors.New("network has no IP") + } + if len(t.Nodes) == 0 { + return errors.New("network has no nodes") + } + switch t.KeyType { + case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + default: + return errors.New("unsupported KeyType") + } + for _, node := range t.Nodes { + if err := node.Validate(t); err != nil { + return fmt.Errorf("invalid node %q: %w", node.Name, err) + } + } + return nil +} + +// Validate validates a node. +func (n Node) Validate(testnet Testnet) error { + if n.Name == "" { + return errors.New("node has no name") + } + if n.IP == nil { + return errors.New("node has no IP address") + } + if !testnet.IP.Contains(n.IP) { + return fmt.Errorf("node IP %v is not in testnet network %v", n.IP, testnet.IP) + } + if n.ProxyPort > 0 { + if n.ProxyPort <= 1024 { + return fmt.Errorf("local port %v must be >1024", n.ProxyPort) + } + for _, peer := range testnet.Nodes { + if peer.Name != n.Name && peer.ProxyPort == n.ProxyPort { + return fmt.Errorf("peer %q also has local port %v", peer.Name, n.ProxyPort) + } + } + } + switch n.FastSync { + case "", "v0", "v1", "v2": + default: + return fmt.Errorf("invalid fast sync setting %q", n.FastSync) + } + switch n.Database { + case "goleveldb", "cleveldb", "boltdb", "rocksdb", "badgerdb": + default: + return fmt.Errorf("invalid database setting %q", n.Database) + } + switch n.ABCIProtocol { + case ProtocolBuiltin, ProtocolUNIX, ProtocolTCP, ProtocolGRPC: + default: + return fmt.Errorf("invalid ABCI protocol setting %q", n.ABCIProtocol) + } + switch n.PrivvalProtocol { + case ProtocolFile, ProtocolUNIX, ProtocolTCP: + default: + return fmt.Errorf("invalid privval protocol setting %q", n.PrivvalProtocol) + } + + if n.StartAt > 0 && n.StartAt < n.Testnet.InitialHeight { + return fmt.Errorf("cannot start at height %v lower than initial height %v", + n.StartAt, n.Testnet.InitialHeight) + } + if n.StateSync && n.StartAt == 0 { + return errors.New("state synced nodes cannot start at the initial height") + } + if n.PersistInterval == 0 && n.RetainBlocks > 0 { + return errors.New("persist_interval=0 requires retain_blocks=0") + } + if n.PersistInterval > 1 && n.RetainBlocks > 0 && n.RetainBlocks < n.PersistInterval { + return errors.New("persist_interval must be less than or equal to retain_blocks") + } + if n.SnapshotInterval > 0 && n.RetainBlocks > 0 && n.RetainBlocks < n.SnapshotInterval { + return errors.New("snapshot_interval must be less than er equal to retain_blocks") + } + + for _, perturbation := range n.Perturbations { + switch perturbation { + case PerturbationDisconnect, PerturbationKill, PerturbationPause, PerturbationRestart: + default: + return fmt.Errorf("invalid perturbation %q", perturbation) + } + } + + if (n.PrivvalProtocol != "file" || n.Mode != "validator") && len(n.Misbehaviors) != 0 { + return errors.New("must be using \"file\" privval protocol to implement misbehaviors") + } + + for height, misbehavior := range n.Misbehaviors { + if height < n.StartAt { + return fmt.Errorf("misbehavior height %d is below node start height %d", + height, n.StartAt) + } + if height < testnet.InitialHeight { + return fmt.Errorf("misbehavior height %d is below network initial height %d", + height, testnet.InitialHeight) + } + exists := false + for possibleBehaviors := range mcs.MisbehaviorList { + if possibleBehaviors == misbehavior { + exists = true + } + } + if !exists { + return fmt.Errorf("misbehavior %s does not exist", misbehavior) + } + } + + return nil +} + +// LookupNode looks up a node by name. For now, simply do a linear search. +func (t Testnet) LookupNode(name string) *Node { + for _, node := range t.Nodes { + if node.Name == name { + return node + } + } + return nil +} + +// ArchiveNodes returns a list of archive nodes that start at the initial height +// and contain the entire blockchain history. They are used e.g. as light client +// RPC servers. +func (t Testnet) ArchiveNodes() []*Node { + nodes := []*Node{} + for _, node := range t.Nodes { + if node.Mode != ModeSeed && node.StartAt == 0 && node.RetainBlocks == 0 { + nodes = append(nodes, node) + } + } + return nodes +} + +// RandomNode returns a random non-seed node. +func (t Testnet) RandomNode() *Node { + for { + node := t.Nodes[rand.Intn(len(t.Nodes))] + if node.Mode != ModeSeed { + return node + } + } +} + +// IPv6 returns true if the testnet is an IPv6 network. +func (t Testnet) IPv6() bool { + return t.IP.IP.To4() == nil +} + +// HasPerturbations returns whether the network has any perturbations. +func (t Testnet) HasPerturbations() bool { + for _, node := range t.Nodes { + if len(node.Perturbations) > 0 { + return true + } + } + return false +} + +// LastMisbehaviorHeight returns the height of the last misbehavior. +func (t Testnet) LastMisbehaviorHeight() int64 { + lastHeight := int64(0) + for _, node := range t.Nodes { + for height := range node.Misbehaviors { + if height > lastHeight { + lastHeight = height + } + } + } + return lastHeight +} + +// Address returns a P2P endpoint address for the node. +func (n Node) AddressP2P(withID bool) string { + ip := n.IP.String() + if n.IP.To4() == nil { + // IPv6 addresses must be wrapped in [] to avoid conflict with : port separator + ip = fmt.Sprintf("[%v]", ip) + } + addr := fmt.Sprintf("%v:26656", ip) + if withID { + addr = fmt.Sprintf("%x@%v", n.NodeKey.PubKey().Address().Bytes(), addr) + } + return addr +} + +// Address returns an RPC endpoint address for the node. +func (n Node) AddressRPC() string { + ip := n.IP.String() + if n.IP.To4() == nil { + // IPv6 addresses must be wrapped in [] to avoid conflict with : port separator + ip = fmt.Sprintf("[%v]", ip) + } + return fmt.Sprintf("%v:26657", ip) +} + +// Client returns an RPC client for a node. +func (n Node) Client() (*rpchttp.HTTP, error) { + return rpchttp.New(fmt.Sprintf("http://127.0.0.1:%v", n.ProxyPort), "/websocket") +} + +// keyGenerator generates pseudorandom Ed25519 keys based on a seed. +type keyGenerator struct { + random *rand.Rand +} + +func newKeyGenerator(seed int64) *keyGenerator { + return &keyGenerator{ + random: rand.New(rand.NewSource(seed)), + } +} + +func (g *keyGenerator) Generate(keyType string) crypto.PrivKey { + seed := make([]byte, ed25519.SeedSize) + + _, err := io.ReadFull(g.random, seed) + if err != nil { + panic(err) // this shouldn't happen + } + switch keyType { + case "secp256k1": + return secp256k1.GenPrivKeySecp256k1(seed) + case "", "ed25519": + return ed25519.GenPrivKeyFromSecret(seed) + default: + panic("KeyType not supported") // should not make it this far + } +} + +// portGenerator generates local Docker proxy ports for each node. +type portGenerator struct { + nextPort uint32 +} + +func newPortGenerator(firstPort uint32) *portGenerator { + return &portGenerator{nextPort: firstPort} +} + +func (g *portGenerator) Next() uint32 { + port := g.nextPort + g.nextPort++ + if g.nextPort == 0 { + panic("port overflow") + } + return port +} + +// ipGenerator generates sequential IP addresses for each node, using a random +// network address. +type ipGenerator struct { + network *net.IPNet + nextIP net.IP +} + +func newIPGenerator(network *net.IPNet) *ipGenerator { + nextIP := make([]byte, len(network.IP)) + copy(nextIP, network.IP) + gen := &ipGenerator{network: network, nextIP: nextIP} + // Skip network and gateway addresses + gen.Next() + gen.Next() + return gen +} + +func (g *ipGenerator) Network() *net.IPNet { + n := &net.IPNet{ + IP: make([]byte, len(g.network.IP)), + Mask: make([]byte, len(g.network.Mask)), + } + copy(n.IP, g.network.IP) + copy(n.Mask, g.network.Mask) + return n +} + +func (g *ipGenerator) Next() net.IP { + ip := make([]byte, len(g.nextIP)) + copy(ip, g.nextIP) + for i := len(g.nextIP) - 1; i >= 0; i-- { + g.nextIP[i]++ + if g.nextIP[i] != 0 { + break + } + } + return ip +} diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh new file mode 100755 index 000000000..5d6a20ef9 --- /dev/null +++ b/test/e2e/run-multiple.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# +# This is a convenience script that takes a list of testnet manifests +# as arguments and runs each one of them sequentially. If a testnet +# fails, the container logs are dumped to stdout along with the testnet +# manifest, but the remaining testnets are still run. +# +# This is mostly used to run generated networks in nightly CI jobs. +# + +set -euo pipefail + +if [[ $# == 0 ]]; then + echo "Usage: $0 [MANIFEST...]" >&2 + exit 1 +fi + +FAILED=() + +for MANIFEST in "$@"; do + START=$SECONDS + echo "==> Running testnet $MANIFEST..." + + if ! ./build/runner -f "$MANIFEST"; then + echo "==> Testnet $MANIFEST failed, dumping manifest..." + cat "$MANIFEST" + + echo "==> Dumping container logs for $MANIFEST..." + ./build/runner -f "$MANIFEST" logs + + echo "==> Cleaning up failed testnet $MANIFEST..." + ./build/runner -f "$MANIFEST" cleanup + + FAILED+=("$MANIFEST") + fi + + echo "==> Completed testnet $MANIFEST in $(( SECONDS - START ))s" + echo "" +done + +if [[ ${#FAILED[@]} -ne 0 ]]; then + echo "${#FAILED[@]} testnets failed:" + for MANIFEST in "${FAILED[@]}"; do + echo "- $MANIFEST" + done + exit 1 +else + echo "All testnets successful" +fi diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go new file mode 100644 index 000000000..d99ca54cf --- /dev/null +++ b/test/e2e/runner/cleanup.go @@ -0,0 +1,83 @@ +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Cleanup removes the Docker Compose containers and testnet directory. +func Cleanup(testnet *e2e.Testnet) error { + err := cleanupDocker() + if err != nil { + return err + } + err = cleanupDir(testnet.Dir) + if err != nil { + return err + } + return nil +} + +// cleanupDocker removes all E2E resources (with label e2e=True), regardless +// of testnet. +func cleanupDocker() error { + logger.Info("Removing Docker containers and networks") + + // GNU xargs requires the -r flag to not run when input is empty, macOS + // does this by default. Ugly, but works. + xargsR := `$(if [[ $OSTYPE == "linux-gnu"* ]]; then echo -n "-r"; fi)` + + err := exec("bash", "-c", fmt.Sprintf( + "docker container ls -qa --filter label=e2e | xargs %v docker container rm -f", xargsR)) + if err != nil { + return err + } + + err = exec("bash", "-c", fmt.Sprintf( + "docker network ls -q --filter label=e2e | xargs %v docker network rm", xargsR)) + if err != nil { + return err + } + + return nil +} + +// cleanupDir cleans up a testnet directory +func cleanupDir(dir string) error { + if dir == "" { + return errors.New("no directory set") + } + + _, err := os.Stat(dir) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + logger.Info(fmt.Sprintf("Removing testnet directory %q", dir)) + + // On Linux, some local files in the volume will be owned by root since Tendermint + // runs as root inside the container, so we need to clean them up from within a + // container running as root too. + absDir, err := filepath.Abs(dir) + if err != nil { + return err + } + err = execDocker("run", "--rm", "--entrypoint", "", "-v", fmt.Sprintf("%v:/network", absDir), + "tendermint/e2e-node", "sh", "-c", "rm -rf /network/*/") + if err != nil { + return err + } + + err = os.RemoveAll(dir) + if err != nil { + return err + } + + return nil +} diff --git a/test/e2e/runner/exec.go b/test/e2e/runner/exec.go new file mode 100644 index 000000000..f790f7fc1 --- /dev/null +++ b/test/e2e/runner/exec.go @@ -0,0 +1,50 @@ +//nolint: gosec +package main + +import ( + "fmt" + "os" + osexec "os/exec" + "path/filepath" +) + +// execute executes a shell command. +func exec(args ...string) error { + cmd := osexec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + switch err := err.(type) { + case nil: + return nil + case *osexec.ExitError: + return fmt.Errorf("failed to run %q:\n%v", args, string(out)) + default: + return err + } +} + +// execVerbose executes a shell command while displaying its output. +func execVerbose(args ...string) error { + cmd := osexec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +// execCompose runs a Docker Compose command for a testnet. +func execCompose(dir string, args ...string) error { + return exec(append( + []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execComposeVerbose runs a Docker Compose command for a testnet and displays its output. +func execComposeVerbose(dir string, args ...string) error { + return execVerbose(append( + []string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")}, + args...)...) +} + +// execDocker runs a Docker command. +func execDocker(args ...string) error { + return exec(append([]string{"docker"}, args...)...) +} diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go new file mode 100644 index 000000000..495c573d3 --- /dev/null +++ b/test/e2e/runner/load.go @@ -0,0 +1,114 @@ +package main + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + "math" + "time" + + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +// Load generates transactions against the network until the given +// context is cancelled. +func Load(ctx context.Context, testnet *e2e.Testnet) error { + // Since transactions are executed across all nodes in the network, we need + // to reduce transaction load for larger networks to avoid using too much + // CPU. This gives high-throughput small networks and low-throughput large ones. + // This also limits the number of TCP connections, since each worker has + // a connection to all nodes. + concurrency := 64 / len(testnet.Nodes) + if concurrency == 0 { + concurrency = 1 + } + initialTimeout := 1 * time.Minute + stallTimeout := 30 * time.Second + + chTx := make(chan types.Tx) + chSuccess := make(chan types.Tx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Spawn job generator and processors. + logger.Info(fmt.Sprintf("Starting transaction load (%v workers)...", concurrency)) + started := time.Now() + + go loadGenerate(ctx, chTx) + + for w := 0; w < concurrency; w++ { + go loadProcess(ctx, testnet, chTx, chSuccess) + } + + // Monitor successful transactions, and abort on stalls. + success := 0 + timeout := initialTimeout + for { + select { + case <-chSuccess: + success++ + timeout = stallTimeout + case <-time.After(timeout): + return fmt.Errorf("unable to submit transactions for %v", timeout) + case <-ctx.Done(): + if success == 0 { + return errors.New("failed to submit any transactions") + } + logger.Info(fmt.Sprintf("Ending transaction load after %v txs (%.1f tx/s)...", + success, float64(success)/time.Since(started).Seconds())) + return nil + } + } +} + +// loadGenerate generates jobs until the context is cancelled +func loadGenerate(ctx context.Context, chTx chan<- types.Tx) { + for i := 0; i < math.MaxInt64; i++ { + // We keep generating the same 1000 keys over and over, with different values. + // This gives a reasonable load without putting too much data in the app. + id := i % 1000 + + bz := make([]byte, 2048) // 4kb hex-encoded + _, err := rand.Read(bz) + if err != nil { + panic(fmt.Sprintf("Failed to read random bytes: %v", err)) + } + tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz)) + + select { + case chTx <- tx: + time.Sleep(10 * time.Millisecond) + case <-ctx.Done(): + close(chTx) + return + } + } +} + +// loadProcess processes transactions +func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) { + // Each worker gets its own client to each node, which allows for some + // concurrency while still bounding it. + clients := map[string]*rpchttp.HTTP{} + + var err error + for tx := range chTx { + node := testnet.RandomNode() + client, ok := clients[node.Name] + if !ok { + client, err = node.Client() + if err != nil { + continue + } + clients[node.Name] = client + } + _, err = client.BroadcastTxCommit(ctx, tx) + if err != nil { + continue + } + chSuccess <- tx + } +} diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go new file mode 100644 index 000000000..d55fd95f2 --- /dev/null +++ b/test/e2e/runner/main.go @@ -0,0 +1,216 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +var ( + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +) + +func main() { + NewCLI().Run() +} + +// CLI is the Cobra-based command-line interface. +type CLI struct { + root *cobra.Command + testnet *e2e.Testnet + preserve bool +} + +// NewCLI sets up the CLI. +func NewCLI() *CLI { + cli := &CLI{} + cli.root = &cobra.Command{ + Use: "runner", + Short: "End-to-end test runner", + SilenceUsage: true, + SilenceErrors: true, // we'll output them ourselves in Run() + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + file, err := cmd.Flags().GetString("file") + if err != nil { + return err + } + testnet, err := e2e.LoadTestnet(file) + if err != nil { + return err + } + + cli.testnet = testnet + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if err := Cleanup(cli.testnet); err != nil { + return err + } + if err := Setup(cli.testnet); err != nil { + return err + } + + chLoadResult := make(chan error) + ctx, loadCancel := context.WithCancel(context.Background()) + defer loadCancel() + go func() { + err := Load(ctx, cli.testnet) + if err != nil { + logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error())) + } + chLoadResult <- err + }() + + if err := Start(cli.testnet); err != nil { + return err + } + + if lastMisbehavior := cli.testnet.LastMisbehaviorHeight(); lastMisbehavior > 0 { + // wait for misbehaviors before starting perturbations. We do a separate + // wait for another 5 blocks, since the last misbehavior height may be + // in the past depending on network startup ordering. + if err := WaitUntil(cli.testnet, lastMisbehavior); err != nil { + return err + } + } + if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + return err + } + + if cli.testnet.HasPerturbations() { + if err := Perturb(cli.testnet); err != nil { + return err + } + if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + return err + } + } + + loadCancel() + if err := <-chLoadResult; err != nil { + return err + } + if err := Wait(cli.testnet, 5); err != nil { // wait for network to settle before tests + return err + } + if err := Test(cli.testnet); err != nil { + return err + } + if !cli.preserve { + if err := Cleanup(cli.testnet); err != nil { + return err + } + } + return nil + }, + } + + cli.root.PersistentFlags().StringP("file", "f", "", "Testnet TOML manifest") + _ = cli.root.MarkPersistentFlagRequired("file") + + cli.root.Flags().BoolVarP(&cli.preserve, "preserve", "p", false, + "Preserves the running of the test net after tests are completed") + + cli.root.AddCommand(&cobra.Command{ + Use: "setup", + Short: "Generates the testnet directory and configuration", + RunE: func(cmd *cobra.Command, args []string) error { + return Setup(cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "start", + Short: "Starts the Docker testnet, waiting for nodes to become available", + RunE: func(cmd *cobra.Command, args []string) error { + _, err := os.Stat(cli.testnet.Dir) + if os.IsNotExist(err) { + err = Setup(cli.testnet) + } + if err != nil { + return err + } + return Start(cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "perturb", + Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", + RunE: func(cmd *cobra.Command, args []string) error { + return Perturb(cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "wait", + Short: "Waits for a few blocks to be produced and all nodes to catch up", + RunE: func(cmd *cobra.Command, args []string) error { + return Wait(cli.testnet, 5) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "stop", + Short: "Stops the Docker testnet", + RunE: func(cmd *cobra.Command, args []string) error { + logger.Info("Stopping testnet") + return execCompose(cli.testnet.Dir, "down") + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "load", + Short: "Generates transaction load until the command is cancelled", + RunE: func(cmd *cobra.Command, args []string) error { + return Load(context.Background(), cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "test", + Short: "Runs test cases against a running testnet", + RunE: func(cmd *cobra.Command, args []string) error { + return Test(cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "cleanup", + Short: "Removes the testnet directory", + RunE: func(cmd *cobra.Command, args []string) error { + return Cleanup(cli.testnet) + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "logs", + Short: "Shows the testnet logs", + RunE: func(cmd *cobra.Command, args []string) error { + return execComposeVerbose(cli.testnet.Dir, "logs") + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "tail", + Short: "Tails the testnet logs", + RunE: func(cmd *cobra.Command, args []string) error { + return execComposeVerbose(cli.testnet.Dir, "logs", "--follow") + }, + }) + + return cli +} + +// Run runs the CLI. +func (cli *CLI) Run() { + if err := cli.root.Execute(); err != nil { + logger.Error(err.Error()) + os.Exit(1) + } +} diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go new file mode 100644 index 000000000..5194b70a6 --- /dev/null +++ b/test/e2e/runner/perturb.go @@ -0,0 +1,75 @@ +package main + +import ( + "fmt" + "time" + + rpctypes "github.com/tendermint/tendermint/rpc/core/types" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Perturbs a running testnet. +func Perturb(testnet *e2e.Testnet) error { + for _, node := range testnet.Nodes { + for _, perturbation := range node.Perturbations { + _, err := PerturbNode(node, perturbation) + if err != nil { + return err + } + time.Sleep(3 * time.Second) // give network some time to recover between each + } + } + return nil +} + +// PerturbNode perturbs a node with a given perturbation, returning its status +// after recovering. +func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { + testnet := node.Testnet + switch perturbation { + case e2e.PerturbationDisconnect: + logger.Info(fmt.Sprintf("Disconnecting node %v...", node.Name)) + if err := execDocker("network", "disconnect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + return nil, err + } + time.Sleep(10 * time.Second) + if err := execDocker("network", "connect", testnet.Name+"_"+testnet.Name, node.Name); err != nil { + return nil, err + } + + case e2e.PerturbationKill: + logger.Info(fmt.Sprintf("Killing node %v...", node.Name)) + if err := execCompose(testnet.Dir, "kill", "-s", "SIGKILL", node.Name); err != nil { + return nil, err + } + if err := execCompose(testnet.Dir, "start", node.Name); err != nil { + return nil, err + } + + case e2e.PerturbationPause: + logger.Info(fmt.Sprintf("Pausing node %v...", node.Name)) + if err := execCompose(testnet.Dir, "pause", node.Name); err != nil { + return nil, err + } + time.Sleep(10 * time.Second) + if err := execCompose(testnet.Dir, "unpause", node.Name); err != nil { + return nil, err + } + + case e2e.PerturbationRestart: + logger.Info(fmt.Sprintf("Restarting node %v...", node.Name)) + if err := execCompose(testnet.Dir, "restart", node.Name); err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("unexpected perturbation %q", perturbation) + } + + status, err := waitForNode(node, 0, 10*time.Second) + if err != nil { + return nil, err + } + logger.Info(fmt.Sprintf("Node %v recovered at height %v", node.Name, status.SyncInfo.LatestBlockHeight)) + return status, nil +} diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go new file mode 100644 index 000000000..c50ab6542 --- /dev/null +++ b/test/e2e/runner/rpc.go @@ -0,0 +1,107 @@ +package main + +import ( + "context" + "errors" + "fmt" + "time" + + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + rpctypes "github.com/tendermint/tendermint/rpc/core/types" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +// waitForHeight waits for the network to reach a certain height (or above), +// returning the highest height seen. Errors if the network is not making +// progress at all. +func waitForHeight(testnet *e2e.Testnet, height int64) (*types.Block, *types.BlockID, error) { + var ( + err error + maxResult *rpctypes.ResultBlock + clients = map[string]*rpchttp.HTTP{} + lastIncrease = time.Now() + ) + + for { + for _, node := range testnet.Nodes { + if node.Mode == e2e.ModeSeed { + continue + } + client, ok := clients[node.Name] + if !ok { + client, err = node.Client() + if err != nil { + continue + } + clients[node.Name] = client + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + result, err := client.Block(ctx, nil) + if err != nil { + continue + } + if result.Block != nil && (maxResult == nil || result.Block.Height >= maxResult.Block.Height) { + maxResult = result + lastIncrease = time.Now() + } + if maxResult != nil && maxResult.Block.Height >= height { + return maxResult.Block, &maxResult.BlockID, nil + } + } + + if len(clients) == 0 { + return nil, nil, errors.New("unable to connect to any network nodes") + } + if time.Since(lastIncrease) >= 20*time.Second { + if maxResult == nil { + return nil, nil, errors.New("chain stalled at unknown height") + } + return nil, nil, fmt.Errorf("chain stalled at height %v", maxResult.Block.Height) + } + time.Sleep(1 * time.Second) + } +} + +// waitForNode waits for a node to become available and catch up to the given block height. +func waitForNode(node *e2e.Node, height int64, timeout time.Duration) (*rpctypes.ResultStatus, error) { + client, err := node.Client() + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + status, err := client.Status(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) + case errors.Is(err, context.Canceled): + return nil, err + case err == nil && status.SyncInfo.LatestBlockHeight >= height: + return status, nil + } + + time.Sleep(200 * time.Millisecond) + } +} + +// waitForAllNodes waits for all nodes to become available and catch up to the given block height. +func waitForAllNodes(testnet *e2e.Testnet, height int64, timeout time.Duration) (int64, error) { + lastHeight := int64(0) + for _, node := range testnet.Nodes { + if node.Mode == e2e.ModeSeed { + continue + } + status, err := waitForNode(node, height, 20*time.Second) + if err != nil { + return 0, err + } + if status.SyncInfo.LatestBlockHeight > lastHeight { + lastHeight = status.SyncInfo.LatestBlockHeight + } + } + return lastHeight, nil +} diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go new file mode 100644 index 000000000..d9c3ab0cf --- /dev/null +++ b/test/e2e/runner/setup.go @@ -0,0 +1,398 @@ +// nolint: gosec +package main + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "text/template" + "time" + + "github.com/BurntSushi/toml" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +const ( + AppAddressTCP = "tcp://127.0.0.1:30000" + AppAddressUNIX = "unix:///var/run/app.sock" + + PrivvalAddressTCP = "tcp://0.0.0.0:27559" + PrivvalAddressUNIX = "unix:///var/run/privval.sock" + PrivvalKeyFile = "config/priv_validator_key.json" + PrivvalStateFile = "data/priv_validator_state.json" + PrivvalDummyKeyFile = "config/dummy_validator_key.json" + PrivvalDummyStateFile = "data/dummy_validator_state.json" +) + +// Setup sets up the testnet configuration. +func Setup(testnet *e2e.Testnet) error { + logger.Info(fmt.Sprintf("Generating testnet files in %q", testnet.Dir)) + + err := os.MkdirAll(testnet.Dir, os.ModePerm) + if err != nil { + return err + } + + compose, err := MakeDockerCompose(testnet) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + if err != nil { + return err + } + + genesis, err := MakeGenesis(testnet) + if err != nil { + return err + } + + for _, node := range testnet.Nodes { + nodeDir := filepath.Join(testnet.Dir, node.Name) + dirs := []string{ + filepath.Join(nodeDir, "config"), + filepath.Join(nodeDir, "data"), + filepath.Join(nodeDir, "data", "app"), + } + for _, dir := range dirs { + err := os.MkdirAll(dir, 0755) + if err != nil { + return err + } + } + + err = genesis.SaveAs(filepath.Join(nodeDir, "config", "genesis.json")) + if err != nil { + return err + } + + cfg, err := MakeConfig(node) + if err != nil { + return err + } + config.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), cfg) // panics + + appCfg, err := MakeAppConfig(node) + if err != nil { + return err + } + err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + if err != nil { + return err + } + + err = (&p2p.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) + if err != nil { + return err + } + + (privval.NewFilePV(node.PrivvalKey, + filepath.Join(nodeDir, PrivvalKeyFile), + filepath.Join(nodeDir, PrivvalStateFile), + )).Save() + + // Set up a dummy validator. Tendermint requires a file PV even when not used, so we + // give it a dummy such that it will fail if it actually tries to use it. + (privval.NewFilePV(ed25519.GenPrivKey(), + filepath.Join(nodeDir, PrivvalDummyKeyFile), + filepath.Join(nodeDir, PrivvalDummyStateFile), + )).Save() + } + + return nil +} + +// MakeDockerCompose generates a Docker Compose config for a testnet. +func MakeDockerCompose(testnet *e2e.Testnet) ([]byte, error) { + // Must use version 2 Docker Compose format, to support IPv6. + tmpl, err := template.New("docker-compose").Funcs(template.FuncMap{ + "misbehaviorsToString": func(misbehaviors map[int64]string) string { + str := "" + for height, misbehavior := range misbehaviors { + // after the first behavior set, a comma must be prepended + if str != "" { + str += "," + } + heightString := strconv.Itoa(int(height)) + str += misbehavior + "," + heightString + } + return str + }, + }).Parse(`version: '2.4' + +networks: + {{ .Name }}: + labels: + e2e: true + driver: bridge +{{- if .IPv6 }} + enable_ipv6: true +{{- end }} + ipam: + driver: default + config: + - subnet: {{ .IP }} + +services: +{{- range .Nodes }} + {{ .Name }}: + labels: + e2e: true + container_name: {{ .Name }} + image: tendermint/e2e-node +{{- if eq .ABCIProtocol "builtin" }} + entrypoint: /usr/bin/entrypoint-builtin +{{- else if .Misbehaviors }} + entrypoint: /usr/bin/entrypoint-maverick + command: ["node", "--misbehaviors", "{{ misbehaviorsToString .Misbehaviors }}"] +{{- end }} + init: true + ports: + - 26656 + - {{ if .ProxyPort }}{{ .ProxyPort }}:{{ end }}26657 + volumes: + - ./{{ .Name }}:/tendermint + networks: + {{ $.Name }}: + ipv{{ if $.IPv6 }}6{{ else }}4{{ end}}_address: {{ .IP }} + +{{end}}`) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, testnet) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MakeGenesis generates a genesis document. +func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { + genesis := types.GenesisDoc{ + GenesisTime: time.Now(), + ChainID: testnet.Name, + ConsensusParams: types.DefaultConsensusParams(), + InitialHeight: testnet.InitialHeight, + } + switch testnet.KeyType { + case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + genesis.ConsensusParams.Validator.PubKeyTypes = + append(genesis.ConsensusParams.Validator.PubKeyTypes, types.ABCIPubKeyTypeSecp256k1) + default: + return genesis, errors.New("unsupported KeyType") + } + for validator, power := range testnet.Validators { + genesis.Validators = append(genesis.Validators, types.GenesisValidator{ + Name: validator.Name, + Address: validator.PrivvalKey.PubKey().Address(), + PubKey: validator.PrivvalKey.PubKey(), + Power: power, + }) + } + // The validator set will be sorted internally by Tendermint ranked by power, + // but we sort it here as well so that all genesis files are identical. + sort.Slice(genesis.Validators, func(i, j int) bool { + return strings.Compare(genesis.Validators[i].Name, genesis.Validators[j].Name) == -1 + }) + if len(testnet.InitialState) > 0 { + appState, err := json.Marshal(testnet.InitialState) + if err != nil { + return genesis, err + } + genesis.AppState = appState + } + return genesis, genesis.ValidateAndComplete() +} + +// MakeConfig generates a Tendermint config for a node. +func MakeConfig(node *e2e.Node) (*config.Config, error) { + cfg := config.DefaultConfig() + cfg.Moniker = node.Name + cfg.ProxyApp = AppAddressTCP + cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" + cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) + cfg.P2P.AddrBookStrict = false + cfg.DBBackend = node.Database + cfg.StateSync.DiscoveryTime = 5 * time.Second + + switch node.ABCIProtocol { + case e2e.ProtocolUNIX: + cfg.ProxyApp = AppAddressUNIX + case e2e.ProtocolTCP: + cfg.ProxyApp = AppAddressTCP + case e2e.ProtocolGRPC: + cfg.ProxyApp = AppAddressTCP + cfg.ABCI = "grpc" + case e2e.ProtocolBuiltin: + cfg.ProxyApp = "" + cfg.ABCI = "" + default: + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + } + + // Tendermint errors if it does not have a privval key set up, regardless of whether + // it's actually needed (e.g. for remote KMS or non-validators). We set up a dummy + // key here by default, and use the real key for actual validators that should use + // the file privval. + cfg.PrivValidatorListenAddr = "" + cfg.PrivValidatorKey = PrivvalDummyKeyFile + cfg.PrivValidatorState = PrivvalDummyStateFile + + switch node.Mode { + case e2e.ModeValidator: + switch node.PrivvalProtocol { + case e2e.ProtocolFile: + cfg.PrivValidatorKey = PrivvalKeyFile + cfg.PrivValidatorState = PrivvalStateFile + case e2e.ProtocolUNIX: + cfg.PrivValidatorListenAddr = PrivvalAddressUNIX + case e2e.ProtocolTCP: + cfg.PrivValidatorListenAddr = PrivvalAddressTCP + default: + return nil, fmt.Errorf("invalid privval protocol setting %q", node.PrivvalProtocol) + } + case e2e.ModeSeed: + cfg.P2P.SeedMode = true + cfg.P2P.PexReactor = true + case e2e.ModeFull: + // Don't need to do anything, since we're using a dummy privval key by default. + default: + return nil, fmt.Errorf("unexpected mode %q", node.Mode) + } + + if node.FastSync == "" { + cfg.FastSyncMode = false + } else { + cfg.FastSync.Version = node.FastSync + } + + if node.StateSync { + cfg.StateSync.Enable = true + cfg.StateSync.RPCServers = []string{} + for _, peer := range node.Testnet.ArchiveNodes() { + if peer.Name == node.Name { + continue + } + cfg.StateSync.RPCServers = append(cfg.StateSync.RPCServers, peer.AddressRPC()) + } + if len(cfg.StateSync.RPCServers) < 2 { + return nil, errors.New("unable to find 2 suitable state sync RPC servers") + } + } + + cfg.P2P.Seeds = "" + for _, seed := range node.Seeds { + if len(cfg.P2P.Seeds) > 0 { + cfg.P2P.Seeds += "," + } + cfg.P2P.Seeds += seed.AddressP2P(true) + } + cfg.P2P.PersistentPeers = "" + for _, peer := range node.PersistentPeers { + if len(cfg.P2P.PersistentPeers) > 0 { + cfg.P2P.PersistentPeers += "," + } + cfg.P2P.PersistentPeers += peer.AddressP2P(true) + } + return cfg, nil +} + +// MakeAppConfig generates an ABCI application config for a node. +func MakeAppConfig(node *e2e.Node) ([]byte, error) { + cfg := map[string]interface{}{ + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), + } + switch node.ABCIProtocol { + case e2e.ProtocolUNIX: + cfg["listen"] = AppAddressUNIX + case e2e.ProtocolTCP: + cfg["listen"] = AppAddressTCP + case e2e.ProtocolGRPC: + cfg["listen"] = AppAddressTCP + cfg["protocol"] = "grpc" + case e2e.ProtocolBuiltin: + delete(cfg, "listen") + cfg["protocol"] = "builtin" + default: + return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.ABCIProtocol) + } + if node.Mode == e2e.ModeValidator { + switch node.PrivvalProtocol { + case e2e.ProtocolFile: + case e2e.ProtocolTCP: + cfg["privval_server"] = PrivvalAddressTCP + cfg["privval_key"] = PrivvalKeyFile + cfg["privval_state"] = PrivvalStateFile + case e2e.ProtocolUNIX: + cfg["privval_server"] = PrivvalAddressUNIX + cfg["privval_key"] = PrivvalKeyFile + cfg["privval_state"] = PrivvalStateFile + default: + return nil, fmt.Errorf("unexpected privval protocol setting %q", node.PrivvalProtocol) + } + } + + misbehaviors := make(map[string]string) + for height, misbehavior := range node.Misbehaviors { + misbehaviors[strconv.Itoa(int(height))] = misbehavior + } + cfg["misbehaviors"] = misbehaviors + + if len(node.Testnet.ValidatorUpdates) > 0 { + validatorUpdates := map[string]map[string]int64{} + for height, validators := range node.Testnet.ValidatorUpdates { + updateVals := map[string]int64{} + for node, power := range validators { + updateVals[base64.StdEncoding.EncodeToString(node.PrivvalKey.PubKey().Bytes())] = power + } + validatorUpdates[fmt.Sprintf("%v", height)] = updateVals + } + cfg["validator_update"] = validatorUpdates + } + + var buf bytes.Buffer + err := toml.NewEncoder(&buf).Encode(cfg) + if err != nil { + return nil, fmt.Errorf("failed to generate app config: %w", err) + } + return buf.Bytes(), nil +} + +// UpdateConfigStateSync updates the state sync config for a node. +func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { + cfgPath := filepath.Join(node.Testnet.Dir, node.Name, "config", "config.toml") + + // FIXME Apparently there's no function to simply load a config file without + // involving the entire Viper apparatus, so we'll just resort to regexps. + bz, err := ioutil.ReadFile(cfgPath) + if err != nil { + return err + } + bz = regexp.MustCompile(`(?m)^trust_height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_height = %v`, height))) + bz = regexp.MustCompile(`(?m)^trust_hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust_hash = "%X"`, hash))) + return ioutil.WriteFile(cfgPath, bz, 0644) +} diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go new file mode 100644 index 000000000..53acdd821 --- /dev/null +++ b/test/e2e/runner/start.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "sort" + "time" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +func Start(testnet *e2e.Testnet) error { + + // Nodes are already sorted by name. Sort them by name then startAt, + // which gives the overall order startAt, mode, name. + nodeQueue := testnet.Nodes + sort.SliceStable(nodeQueue, func(i, j int) bool { + a, b := nodeQueue[i], nodeQueue[j] + switch { + case a.Mode == b.Mode: + return false + case a.Mode == e2e.ModeSeed: + return true + case a.Mode == e2e.ModeValidator && b.Mode == e2e.ModeFull: + return true + } + return false + }) + sort.SliceStable(nodeQueue, func(i, j int) bool { + return nodeQueue[i].StartAt < nodeQueue[j].StartAt + }) + if len(nodeQueue) == 0 { + return fmt.Errorf("no nodes in testnet") + } + if nodeQueue[0].StartAt > 0 { + return fmt.Errorf("no initial nodes in testnet") + } + + // Start initial nodes (StartAt: 0) + logger.Info("Starting initial network nodes...") + for len(nodeQueue) > 0 && nodeQueue[0].StartAt == 0 { + node := nodeQueue[0] + nodeQueue = nodeQueue[1:] + if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + return err + } + if _, err := waitForNode(node, 0, 15*time.Second); err != nil { + return err + } + logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) + } + + // Wait for initial height + logger.Info(fmt.Sprintf("Waiting for initial height %v...", testnet.InitialHeight)) + block, blockID, err := waitForHeight(testnet, testnet.InitialHeight) + if err != nil { + return err + } + + // Update any state sync nodes with a trusted height and hash + for _, node := range nodeQueue { + if node.StateSync { + err = UpdateConfigStateSync(node, block.Height, blockID.Hash.Bytes()) + if err != nil { + return err + } + } + } + + // Start up remaining nodes + for _, node := range nodeQueue { + logger.Info(fmt.Sprintf("Starting node %v at height %v...", node.Name, node.StartAt)) + if _, _, err := waitForHeight(testnet, node.StartAt); err != nil { + return err + } + if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { + return err + } + status, err := waitForNode(node, node.StartAt, 1*time.Minute) + if err != nil { + return err + } + logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", + node.Name, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) + } + + return nil +} diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go new file mode 100644 index 000000000..834ce6f2d --- /dev/null +++ b/test/e2e/runner/test.go @@ -0,0 +1,19 @@ +package main + +import ( + "os" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Test runs test cases under tests/ +func Test(testnet *e2e.Testnet) error { + logger.Info("Running tests in ./tests/...") + + err := os.Setenv("E2E_MANIFEST", testnet.File) + if err != nil { + return err + } + + return execVerbose("go", "test", "-count", "1", "./tests/...") +} diff --git a/test/e2e/runner/wait.go b/test/e2e/runner/wait.go new file mode 100644 index 000000000..8e9030856 --- /dev/null +++ b/test/e2e/runner/wait.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "time" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Wait waits for a number of blocks to be produced, and for all nodes to catch +// up with it. +func Wait(testnet *e2e.Testnet, blocks int64) error { + block, _, err := waitForHeight(testnet, 0) + if err != nil { + return err + } + return WaitUntil(testnet, block.Height+blocks) +} + +// WaitUntil waits until a given height has been reached. +func WaitUntil(testnet *e2e.Testnet, height int64) error { + logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height)) + _, err := waitForAllNodes(testnet, height, 20*time.Second) + if err != nil { + return err + } + return nil +} diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go new file mode 100644 index 000000000..82e788ebd --- /dev/null +++ b/test/e2e/tests/app_test.go @@ -0,0 +1,92 @@ +package e2e_test + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +// Tests that any initial state given in genesis has made it into the app. +func TestApp_InitialState(t *testing.T) { + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + if len(node.Testnet.InitialState) == 0 { + return + } + + client, err := node.Client() + require.NoError(t, err) + for k, v := range node.Testnet.InitialState { + resp, err := client.ABCIQuery(ctx, "", []byte(k)) + require.NoError(t, err) + assert.Equal(t, k, string(resp.Response.Key)) + assert.Equal(t, v, string(resp.Response.Value)) + } + }) +} + +// Tests that the app hash (as reported by the app) matches the last +// block and the node sync status. +func TestApp_Hash(t *testing.T) { + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + info, err := client.ABCIInfo(ctx) + require.NoError(t, err) + require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") + + block, err := client.Block(ctx, nil) + require.NoError(t, err) + require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash, + "app hash does not match last block's app hash") + + status, err := client.Status(ctx) + require.NoError(t, err) + require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, + "app hash does not match node status") + }) +} + +// Tests that we can set a value and retrieve it. +func TestApp_Tx(t *testing.T) { + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + + // Generate a random value, to prevent duplicate tx errors when + // manually running the test multiple times for a testnet. + r := rand.New(rand.NewSource(time.Now().UnixNano())) + bz := make([]byte, 32) + _, err = r.Read(bz) + require.NoError(t, err) + + key := fmt.Sprintf("testapp-tx-%v", node.Name) + value := fmt.Sprintf("%x", bz) + tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) + + _, err = client.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + + resp, err := client.ABCIQuery(ctx, "", []byte(key)) + require.NoError(t, err) + assert.Equal(t, key, string(resp.Response.Key)) + assert.Equal(t, value, string(resp.Response.Value)) + }) +} diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go new file mode 100644 index 000000000..369b49d61 --- /dev/null +++ b/test/e2e/tests/block_test.go @@ -0,0 +1,91 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Tests that block headers are identical across nodes where present. +func TestBlock_Header(t *testing.T) { + blocks := fetchBlockChain(t) + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + status, err := client.Status(ctx) + require.NoError(t, err) + + first := status.SyncInfo.EarliestBlockHeight + last := status.SyncInfo.LatestBlockHeight + if node.RetainBlocks > 0 { + first++ // avoid race conditions with block pruning + } + + for _, block := range blocks { + if block.Header.Height < first { + continue + } + if block.Header.Height > last { + break + } + resp, err := client.Block(ctx, &block.Header.Height) + require.NoError(t, err) + require.Equal(t, block, resp.Block, + "block mismatch for height %v", block.Header.Height) + } + }) +} + +// Tests that the node contains the expected block range. +func TestBlock_Range(t *testing.T) { + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + status, err := client.Status(ctx) + require.NoError(t, err) + + first := status.SyncInfo.EarliestBlockHeight + last := status.SyncInfo.LatestBlockHeight + + switch { + case node.StateSync: + assert.Greater(t, first, node.Testnet.InitialHeight, + "state synced nodes should not contain network's initial height") + + case node.RetainBlocks > 0 && int64(node.RetainBlocks) < (last-node.Testnet.InitialHeight+1): + // Delta handles race conditions in reading first/last heights. + assert.InDelta(t, node.RetainBlocks, last-first+1, 1, + "node not pruning expected blocks") + + default: + assert.Equal(t, node.Testnet.InitialHeight, first, + "node's first block should be network's initial height") + } + + for h := first; h <= last; h++ { + resp, err := client.Block(ctx, &(h)) + if err != nil && node.RetainBlocks > 0 && h == first { + // Ignore errors in first block if node is pruning blocks due to race conditions. + continue + } + require.NoError(t, err) + assert.Equal(t, h, resp.Block.Height) + } + + for h := node.Testnet.InitialHeight; h < first; h++ { + _, err := client.Block(ctx, &(h)) + require.Error(t, err) + } + }) +} diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go new file mode 100644 index 000000000..15c747b5b --- /dev/null +++ b/test/e2e/tests/e2e_test.go @@ -0,0 +1,135 @@ +package e2e_test + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + rpctypes "github.com/tendermint/tendermint/rpc/core/types" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +func init() { + // This can be used to manually specify a testnet manifest and/or node to + // run tests against. The testnet must have been started by the runner first. + // os.Setenv("E2E_MANIFEST", "networks/ci.toml") + // os.Setenv("E2E_NODE", "validator01") +} + +var ( + ctx = context.Background() + testnetCache = map[string]e2e.Testnet{} + testnetCacheMtx = sync.Mutex{} + blocksCache = map[string][]*types.Block{} + blocksCacheMtx = sync.Mutex{} +) + +// testNode runs tests for testnet nodes. The callback function is given a +// single node to test, running as a subtest in parallel with other subtests. +// +// The testnet manifest must be given as the envvar E2E_MANIFEST. If not set, +// these tests are skipped so that they're not picked up during normal unit +// test runs. If E2E_NODE is also set, only the specified node is tested, +// otherwise all nodes are tested. +func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { + t.Helper() + + testnet := loadTestnet(t) + nodes := testnet.Nodes + + if name := os.Getenv("E2E_NODE"); name != "" { + node := testnet.LookupNode(name) + require.NotNil(t, node, "node %q not found in testnet %q", name, testnet.Name) + nodes = []*e2e.Node{node} + } + + for _, node := range nodes { + node := *node + t.Run(node.Name, func(t *testing.T) { + t.Parallel() + testFunc(t, node) + }) + } +} + +// loadTestnet loads the testnet based on the E2E_MANIFEST envvar. +func loadTestnet(t *testing.T) e2e.Testnet { + t.Helper() + + manifest := os.Getenv("E2E_MANIFEST") + if manifest == "" { + t.Skip("E2E_MANIFEST not set, not an end-to-end test run") + } + if !filepath.IsAbs(manifest) { + manifest = filepath.Join("..", manifest) + } + + testnetCacheMtx.Lock() + defer testnetCacheMtx.Unlock() + if testnet, ok := testnetCache[manifest]; ok { + return testnet + } + + testnet, err := e2e.LoadTestnet(manifest) + require.NoError(t, err) + testnetCache[manifest] = *testnet + return *testnet +} + +// fetchBlockChain fetches a complete, up-to-date block history from +// the freshest testnet archive node. +func fetchBlockChain(t *testing.T) []*types.Block { + t.Helper() + + testnet := loadTestnet(t) + + // Find the freshest archive node + var ( + client *rpchttp.HTTP + status *rpctypes.ResultStatus + ) + for _, node := range testnet.ArchiveNodes() { + c, err := node.Client() + require.NoError(t, err) + s, err := c.Status(ctx) + require.NoError(t, err) + if status == nil || s.SyncInfo.LatestBlockHeight > status.SyncInfo.LatestBlockHeight { + client = c + status = s + } + } + require.NotNil(t, client, "couldn't find an archive node") + + // Fetch blocks. Look for existing block history in the block cache, and + // extend it with any new blocks that have been produced. + blocksCacheMtx.Lock() + defer blocksCacheMtx.Unlock() + + from := status.SyncInfo.EarliestBlockHeight + to := status.SyncInfo.LatestBlockHeight + blocks, ok := blocksCache[testnet.Name] + if !ok { + blocks = make([]*types.Block, 0, to-from+1) + } + if len(blocks) > 0 { + from = blocks[len(blocks)-1].Height + 1 + } + + for h := from; h <= to; h++ { + resp, err := client.Block(ctx, &(h)) + require.NoError(t, err) + require.NotNil(t, resp.Block) + require.Equal(t, h, resp.Block.Height, "unexpected block height %v", resp.Block.Height) + blocks = append(blocks, resp.Block) + } + require.NotEmpty(t, blocks, "blockchain does not contain any blocks") + blocksCache[testnet.Name] = blocks + + return blocks +} diff --git a/test/e2e/tests/evidence_test.go b/test/e2e/tests/evidence_test.go new file mode 100644 index 000000000..ea24b51e5 --- /dev/null +++ b/test/e2e/tests/evidence_test.go @@ -0,0 +1,57 @@ +package e2e_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +// assert that all nodes that have blocks at the height of a misbehavior has evidence +// for that misbehavior +func TestEvidence_Misbehavior(t *testing.T) { + blocks := fetchBlockChain(t) + testNode(t, func(t *testing.T, node e2e.Node) { + seenEvidence := make(map[int64]struct{}) + for _, block := range blocks { + // Find any evidence blaming this node in this block + var nodeEvidence types.Evidence + for _, evidence := range block.Evidence.Evidence { + switch evidence := evidence.(type) { + case *types.DuplicateVoteEvidence: + if bytes.Equal(evidence.VoteA.ValidatorAddress, node.PrivvalKey.PubKey().Address()) { + nodeEvidence = evidence + } + default: + t.Fatalf("unexpected evidence type %T", evidence) + } + } + if nodeEvidence == nil { + continue // no evidence for the node at this height + } + + // Check that evidence was as expected + misbehavior, ok := node.Misbehaviors[nodeEvidence.Height()] + require.True(t, ok, "found unexpected evidence %v in height %v", + nodeEvidence, block.Height) + + switch misbehavior { + case "double-prevote": + require.IsType(t, &types.DuplicateVoteEvidence{}, nodeEvidence, "unexpected evidence type") + default: + t.Fatalf("unknown misbehavior %v", misbehavior) + } + + seenEvidence[nodeEvidence.Height()] = struct{}{} + } + // see if there is any evidence that we were expecting but didn't see + for height, misbehavior := range node.Misbehaviors { + _, ok := seenEvidence[height] + require.True(t, ok, "expected evidence for %v misbehavior at height %v by node but was never found", + misbehavior, height) + } + }) +} diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go new file mode 100644 index 000000000..1ca43fa05 --- /dev/null +++ b/test/e2e/tests/net_test.go @@ -0,0 +1,46 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +// Tests that all nodes have peered with each other, regardless of discovery method. +func TestNet_Peers(t *testing.T) { + // FIXME Skip test since nodes aren't always able to fully mesh + t.SkipNow() + + testNode(t, func(t *testing.T, node e2e.Node) { + // Seed nodes shouldn't necessarily mesh with the entire network. + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + netInfo, err := client.NetInfo(ctx) + require.NoError(t, err) + + require.Equal(t, len(node.Testnet.Nodes)-1, netInfo.NPeers, + "node is not fully meshed with peers") + + seen := map[string]bool{} + for _, n := range node.Testnet.Nodes { + seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself + } + for _, peerInfo := range netInfo.Peers { + peer := node.Testnet.LookupNode(peerInfo.NodeInfo.Moniker) + require.NotNil(t, peer, "unknown node %v", peerInfo.NodeInfo.Moniker) + require.Equal(t, peer.IP.String(), peerInfo.RemoteIP, + "unexpected IP address for peer %v", peer.Name) + seen[peerInfo.NodeInfo.Moniker] = true + } + + for name := range seen { + require.True(t, seen[name], "node %v not peered with %v", node.Name, name) + } + }) +} diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go new file mode 100644 index 000000000..8a36bb55d --- /dev/null +++ b/test/e2e/tests/validator_test.go @@ -0,0 +1,166 @@ +package e2e_test + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + e2e "github.com/tendermint/tendermint/test/e2e/pkg" + "github.com/tendermint/tendermint/types" +) + +// Tests that validator sets are available and correct according to +// scheduled validator updates. +func TestValidator_Sets(t *testing.T) { + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode == e2e.ModeSeed { + return + } + + client, err := node.Client() + require.NoError(t, err) + status, err := client.Status(ctx) + require.NoError(t, err) + + first := status.SyncInfo.EarliestBlockHeight + last := status.SyncInfo.LatestBlockHeight + + // skip first block if node is pruning blocks, to avoid race conditions + if node.RetainBlocks > 0 { + first++ + } + + valSchedule := newValidatorSchedule(*node.Testnet) + valSchedule.Increment(first - node.Testnet.InitialHeight) + + for h := first; h <= last; h++ { + validators := []*types.Validator{} + perPage := 100 + for page := 1; ; page++ { + resp, err := client.Validators(ctx, &(h), &(page), &perPage) + require.NoError(t, err) + validators = append(validators, resp.Validators...) + if len(validators) == resp.Total { + break + } + } + require.Equal(t, valSchedule.Set.Validators, validators, + "incorrect validator set at height %v", h) + valSchedule.Increment(1) + } + }) +} + +// Tests that a validator proposes blocks when it's supposed to. It tolerates some +// missed blocks, e.g. due to testnet perturbations. +func TestValidator_Propose(t *testing.T) { + blocks := fetchBlockChain(t) + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode != e2e.ModeValidator { + return + } + address := node.PrivvalKey.PubKey().Address() + valSchedule := newValidatorSchedule(*node.Testnet) + + expectCount := 0 + proposeCount := 0 + for _, block := range blocks { + if bytes.Equal(valSchedule.Set.Proposer.Address, address) { + expectCount++ + if bytes.Equal(block.ProposerAddress, address) { + proposeCount++ + } + } + valSchedule.Increment(1) + } + + require.False(t, proposeCount == 0 && expectCount > 0, + "node did not propose any blocks (expected %v)", expectCount) + require.Less(t, expectCount-proposeCount, 5, + "validator missed proposing too many blocks (proposed %v out of %v)", proposeCount, expectCount) + }) +} + +// Tests that a validator signs blocks when it's supposed to. It tolerates some +// missed blocks, e.g. due to testnet perturbations. +func TestValidator_Sign(t *testing.T) { + blocks := fetchBlockChain(t) + testNode(t, func(t *testing.T, node e2e.Node) { + if node.Mode != e2e.ModeValidator { + return + } + address := node.PrivvalKey.PubKey().Address() + valSchedule := newValidatorSchedule(*node.Testnet) + + expectCount := 0 + signCount := 0 + for _, block := range blocks[1:] { // Skip first block, since it has no signatures + signed := false + for _, sig := range block.LastCommit.Signatures { + if bytes.Equal(sig.ValidatorAddress, address) { + signed = true + break + } + } + if valSchedule.Set.HasAddress(address) { + expectCount++ + if signed { + signCount++ + } + } else { + require.False(t, signed, "unexpected signature for block %v", block.LastCommit.Height) + } + valSchedule.Increment(1) + } + + require.False(t, signCount == 0 && expectCount > 0, + "node did not sign any blocks (expected %v)", expectCount) + require.Less(t, float64(expectCount-signCount)/float64(expectCount), 0.5, + "validator missed signing too many blocks (signed %v out of %v)", signCount, expectCount) + }) +} + +// validatorSchedule is a validator set iterator, which takes into account +// validator set updates. +type validatorSchedule struct { + Set *types.ValidatorSet + height int64 + updates map[int64]map[*e2e.Node]int64 +} + +func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { + valMap := testnet.Validators // genesis validators + if v, ok := testnet.ValidatorUpdates[0]; ok { // InitChain validators + valMap = v + } + return &validatorSchedule{ + height: testnet.InitialHeight, + Set: types.NewValidatorSet(makeVals(valMap)), + updates: testnet.ValidatorUpdates, + } +} + +func (s *validatorSchedule) Increment(heights int64) { + for i := int64(0); i < heights; i++ { + s.height++ + if s.height > 2 { + // validator set updates are offset by 2, since they only take effect + // two blocks after they're returned. + if update, ok := s.updates[s.height-2]; ok { + if err := s.Set.UpdateWithChangeSet(makeVals(update)); err != nil { + panic(err) + } + } + } + s.Set.IncrementProposerPriority(1) + } +} + +func makeVals(valMap map[*e2e.Node]int64) []*types.Validator { + vals := make([]*types.Validator, 0, len(valMap)) + for node, power := range valMap { + vals = append(vals, types.NewValidator(node.PrivvalKey.PubKey(), power)) + } + return vals +} diff --git a/test/maverick/README.md b/test/maverick/README.md new file mode 100644 index 000000000..308275536 --- /dev/null +++ b/test/maverick/README.md @@ -0,0 +1,51 @@ +# Maverick + +![](https://assets.rollingstone.com/assets/2015/article/tom-cruise-to-fight-drones-in-top-gun-sequel-20150629/201166/large_rect/1435581755/1401x788-Top-Gun-3.jpg) + +A byzantine node used to test Tendermint consensus against a plethora of different faulty misbehaviors. Designed to easily create new faulty misbehaviors to examine how a Tendermint network reacts to the misbehavior. Can also be used for fuzzy testing with different network arrangements. + +## Misbehaviors + +A misbehavior allows control at the following stages as highlighted by the struct below + +```go +type Misbehavior struct { + String string + + EnterPropose func(cs *State, height int64, round int32) + + EnterPrevote func(cs *State, height int64, round int32) + + EnterPrecommit func(cs *State, height int64, round int32) + + ReceivePrevote func(cs *State, prevote *types.Vote) + + ReceivePrecommit func(cs *State, precommit *types.Vote) + + ReceiveProposal func(cs *State, proposal *types.Proposal) error +} +``` + +At each of these events, the node can exhibit a different misbehavior. To create a new misbehavior define a function that builds off the existing default misbehavior and then overrides one or more of these functions. Then append it to the misbehaviors list so the node recognizes it like so: + +```go +var MisbehaviorList = map[string]Misbehavior{ + "double-prevote": DoublePrevoteMisbehavior(), +} +``` + +## Setup + +The maverick node takes most of the functionality from the existing Tendermint CLI. To install this, in the directory of this readme, run: + +```bash +go build +``` + +Use `maverick init` to initialize a single node and `maverick node` to run it. This will run it normally unless you use the misbehaviors flag as follows: + +```bash +maverick node --proxy_app persistent_kvstore --misbehaviors double-vote,10 +``` + +This would cause the node to vote twice in every round at height 10. To add more misbehaviors at different heights, append the next misbehavior and height after the first (with comma separation). diff --git a/test/maverick/consensus/metrics.go b/test/maverick/consensus/metrics.go new file mode 100644 index 000000000..bbd823a3f --- /dev/null +++ b/test/maverick/consensus/metrics.go @@ -0,0 +1,220 @@ +package consensus + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + + prometheus "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "consensus" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + // Height of the chain. + Height metrics.Gauge + + // ValidatorLastSignedHeight of a validator. + ValidatorLastSignedHeight metrics.Gauge + + // Number of rounds. + Rounds metrics.Gauge + + // Number of validators. + Validators metrics.Gauge + // Total power of all validators. + ValidatorsPower metrics.Gauge + // Power of a validator. + ValidatorPower metrics.Gauge + // Amount of blocks missed by a validator. + ValidatorMissedBlocks metrics.Gauge + // Number of validators who did not sign. + MissingValidators metrics.Gauge + // Total power of the missing validators. + MissingValidatorsPower metrics.Gauge + // Number of validators who tried to double sign. + ByzantineValidators metrics.Gauge + // Total power of the byzantine validators. + ByzantineValidatorsPower metrics.Gauge + + // Time between this and the last block. + BlockIntervalSeconds metrics.Histogram + + // Number of transactions. + NumTxs metrics.Gauge + // Size of the block. + BlockSizeBytes metrics.Gauge + // Total number of transactions. + TotalTxs metrics.Gauge + // The latest block height. + CommittedHeight metrics.Gauge + // Whether or not a node is fast syncing. 1 if yes, 0 if no. + FastSyncing metrics.Gauge + // Whether or not a node is state syncing. 1 if yes, 0 if no. + StateSyncing metrics.Gauge + + // Number of blockparts transmitted by peer. + BlockParts metrics.Counter +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "height", + Help: "Height of the chain.", + }, labels).With(labelsAndValues...), + Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "rounds", + Help: "Number of rounds.", + }, labels).With(labelsAndValues...), + + Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators", + Help: "Number of validators.", + }, labels).With(labelsAndValues...), + ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_last_signed_height", + Help: "Last signed height for a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_missed_blocks", + Help: "Total missed blocks for a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validators_power", + Help: "Total power of all validators.", + }, labels).With(labelsAndValues...), + ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "validator_power", + Help: "Power of a validator", + }, append(labels, "validator_address")).With(labelsAndValues...), + MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators", + Help: "Number of validators who did not sign.", + }, labels).With(labelsAndValues...), + MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "missing_validators_power", + Help: "Total power of the missing validators.", + }, labels).With(labelsAndValues...), + ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators", + Help: "Number of validators who tried to double sign.", + }, labels).With(labelsAndValues...), + ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "byzantine_validators_power", + Help: "Total power of the byzantine validators.", + }, labels).With(labelsAndValues...), + BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_interval_seconds", + Help: "Time between this and the last block.", + }, labels).With(labelsAndValues...), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions.", + }, labels).With(labelsAndValues...), + BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_size_bytes", + Help: "Size of the block.", + }, labels).With(labelsAndValues...), + TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_txs", + Help: "Total number of transactions.", + }, labels).With(labelsAndValues...), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, labels).With(labelsAndValues...), + FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "fast_syncing", + Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of blockparts transmitted by peer.", + }, append(labels, "peer_id")).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + Height: discard.NewGauge(), + + ValidatorLastSignedHeight: discard.NewGauge(), + + Rounds: discard.NewGauge(), + + Validators: discard.NewGauge(), + ValidatorsPower: discard.NewGauge(), + ValidatorPower: discard.NewGauge(), + ValidatorMissedBlocks: discard.NewGauge(), + MissingValidators: discard.NewGauge(), + MissingValidatorsPower: discard.NewGauge(), + ByzantineValidators: discard.NewGauge(), + ByzantineValidatorsPower: discard.NewGauge(), + + BlockIntervalSeconds: discard.NewHistogram(), + + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + FastSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + } +} diff --git a/test/maverick/consensus/misbehavior.go b/test/maverick/consensus/misbehavior.go new file mode 100644 index 000000000..75d2bd278 --- /dev/null +++ b/test/maverick/consensus/misbehavior.go @@ -0,0 +1,398 @@ +package consensus + +import ( + "fmt" + + cstypes "github.com/tendermint/tendermint/consensus/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// MisbehaviorList encompasses a list of all possible behaviors +var MisbehaviorList = map[string]Misbehavior{ + "double-prevote": DoublePrevoteMisbehavior(), +} + +type Misbehavior struct { + Name string + + EnterPropose func(cs *State, height int64, round int32) + + EnterPrevote func(cs *State, height int64, round int32) + + EnterPrecommit func(cs *State, height int64, round int32) + + ReceivePrevote func(cs *State, prevote *types.Vote) + + ReceivePrecommit func(cs *State, precommit *types.Vote) + + ReceiveProposal func(cs *State, proposal *types.Proposal) error +} + +// BEHAVIORS + +func DefaultMisbehavior() Misbehavior { + return Misbehavior{ + Name: "default", + EnterPropose: defaultEnterPropose, + EnterPrevote: defaultEnterPrevote, + EnterPrecommit: defaultEnterPrecommit, + ReceivePrevote: defaultReceivePrevote, + ReceivePrecommit: defaultReceivePrecommit, + ReceiveProposal: defaultReceiveProposal, + } +} + +// DoublePrevoteMisbehavior will make a node prevote both nil and a block in the same +// height and round. +func DoublePrevoteMisbehavior() Misbehavior { + b := DefaultMisbehavior() + b.Name = "double-prevote" + b.EnterPrevote = func(cs *State, height int64, round int32) { + + // If a block is locked, prevote that. + if cs.LockedBlock != nil { + cs.Logger.Info("enterPrevote: Already locked on a block, prevoting locked block") + cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + return + } + + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + cs.Logger.Info("enterPrevote: ProposalBlock is nil") + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + cs.Logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + if cs.sw == nil { + cs.Logger.Error("nil switch") + return + } + + prevote, err := cs.signVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + if err != nil { + cs.Logger.Error("enterPrevote: Unable to sign block", "err", err) + } + + nilPrevote, err := cs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + if err != nil { + cs.Logger.Error("enterPrevote: Unable to sign block", "err", err) + } + + // add our own vote + cs.sendInternalMessage(msgInfo{&VoteMessage{prevote}, ""}) + + cs.Logger.Info("Sending conflicting votes") + peers := cs.sw.Peers().List() + // there has to be at least two other peers connected else this behavior works normally + for idx, peer := range peers { + if idx%2 == 0 { // sign the proposal block + peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) + } else { // sign a nil block + peer.Send(VoteChannel, MustEncode(&VoteMessage{nilPrevote})) + } + } + } + return b +} + +// DEFAULTS + +func defaultEnterPropose(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + // If we don't get the proposal and all block parts quick enough, enterPrevote + cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) + + // Nothing more to do if we're not a validator + if cs.privValidator == nil { + logger.Debug("This node is not a validator") + return + } + logger.Debug("This node is a validator") + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the currentx round, it will + // miss the opportunity to create a block. + logger.Error("Error on retrival of pubkey", "err", err) + return + } + address := pubKey.Address() + + // if not a validator, we're done + if !cs.Validators.HasAddress(address) { + logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) + return + } + + if cs.isProposer(address) { + logger.Info("enterPropose: Our turn to propose", + "proposer", + address, + "privValidator", + cs.privValidator) + cs.decideProposal(height, round) + } else { + logger.Info("enterPropose: Not our turn to propose", + "proposer", + cs.Validators.GetProposer().Address, + "privValidator", + cs.privValidator) + } +} + +func defaultEnterPrevote(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + // If a block is locked, prevote that. + if cs.LockedBlock != nil { + logger.Info("enterPrevote: Already locked on a block, prevoting locked block") + cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + return + } + + // If ProposalBlock is nil, prevote nil. + if cs.ProposalBlock == nil { + logger.Info("enterPrevote: ProposalBlock is nil") + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if err != nil { + // ProposalBlock is invalid, prevote nil. + logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) + cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Prevote cs.ProposalBlock + // NOTE: the proposal signature is validated when it is received, + // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) + logger.Info("enterPrevote: ProposalBlock is valid") + cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) +} + +func defaultEnterPrecommit(cs *State, height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + // check for a polka + blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority() + + // If we don't have a polka, we must precommit nil. + if !ok { + if cs.LockedBlock != nil { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") + } else { + logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") + } + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + + // At this point +2/3 prevoted for a particular block or nil. + _ = cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) + + // the latest POLRound should be this round. + polRound, _ := cs.Votes.POLInfo() + if polRound < round { + panic(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) + } + + // +2/3 prevoted nil. Unlock and precommit nil. + if len(blockID.Hash) == 0 { + if cs.LockedBlock == nil { + logger.Info("enterPrecommit: +2/3 prevoted for nil.") + } else { + logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + + // At this point, +2/3 prevoted for a particular block. + + // If we're already locked on that block, precommit it, and update the LockedRound + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") + cs.LockedRound = round + _ = cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + return + } + + // If +2/3 prevoted for proposal block, stage and precommit it + if cs.ProposalBlock.HashesTo(blockID.Hash) { + logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) + // Validate the block. + if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { + panic(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + } + cs.LockedRound = round + cs.LockedBlock = cs.ProposalBlock + cs.LockedBlockParts = cs.ProposalBlockParts + _ = cs.eventBus.PublishEventLock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + return + } + + // There was a polka in this round for a block we don't have. + // Fetch that block, unlock, and precommit nil. + // The +2/3 prevotes for this round is the POL for our unlock. + logger.Info("enterPrecommit: +2/3 prevotes for a block we don't have. Voting nil", "blockID", blockID) + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + } + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) +} + +func defaultReceivePrevote(cs *State, vote *types.Vote) { + height := cs.Height + prevotes := cs.Votes.Prevotes(vote.Round) + + // If +2/3 prevotes for a block or nil for *any* round: + if blockID, ok := prevotes.TwoThirdsMajority(); ok { + + // There was a polka! + // If we're locked but this is a recent polka, unlock. + // If it matches our ProposalBlock, update the ValidBlock + + // Unlock if `cs.LockedRound < vote.Round <= cs.Round` + // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round + if (cs.LockedBlock != nil) && + (cs.LockedRound < vote.Round) && + (vote.Round <= cs.Round) && + !cs.LockedBlock.HashesTo(blockID.Hash) { + + cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + _ = cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) + } + + // Update Valid* if we can. + // NOTE: our proposal block may be nil or not what received a polka.. + if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info( + "Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) + cs.ValidRound = vote.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Info( + "Valid block we don't know about. Set ProposalBlock=nil", + "proposal", cs.ProposalBlock.Hash(), "blockID", blockID.Hash) + // We're getting the wrong block. + cs.ProposalBlock = nil + } + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + _ = cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + } + } + + // If +2/3 prevotes for *anything* for future round: + switch { + case cs.Round < vote.Round && prevotes.HasTwoThirdsAny(): + // Round-skip if there is any 2/3+ of votes ahead of us + cs.enterNewRound(height, vote.Round) + case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round + blockID, ok := prevotes.TwoThirdsMajority() + if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { + cs.enterPrecommit(height, vote.Round) + } else if prevotes.HasTwoThirdsAny() { + cs.enterPrevoteWait(height, vote.Round) + } + case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round: + // If the proposal is now complete, enter prevote of cs.Round. + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + } + +} + +func defaultReceivePrecommit(cs *State, vote *types.Vote) { + height := cs.Height + precommits := cs.Votes.Precommits(vote.Round) + cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) + + blockID, ok := precommits.TwoThirdsMajority() + if ok { + // Executed as TwoThirdsMajority could be from a higher round + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + if len(blockID.Hash) != 0 { + cs.enterCommit(height, vote.Round) + if cs.config.SkipTimeoutCommit && precommits.HasAll() { + cs.enterNewRound(cs.Height, 0) + } + } else { + cs.enterPrecommitWait(height, vote.Round) + } + } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { + cs.enterNewRound(height, vote.Round) + cs.enterPrecommitWait(height, vote.Round) + } +} + +func defaultReceiveProposal(cs *State, proposal *types.Proposal) error { + // Already have one + // TODO: possibly catch double proposals + if cs.Proposal != nil { + return nil + } + + // Does not apply + if proposal.Height != cs.Height || proposal.Round != cs.Round { + return nil + } + + // Verify POLRound, which must be -1 or in range [0, proposal.Round). + if proposal.POLRound < -1 || + (proposal.POLRound >= 0 && proposal.POLRound >= proposal.Round) { + return ErrInvalidProposalPOLRound + } + + p := proposal.ToProto() + // Verify signature + if !cs.Validators.GetProposer().PubKey.VerifySignature( + types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature) { + return ErrInvalidProposalSignature + } + + proposal.Signature = p.Signature + cs.Proposal = proposal + // We don't update cs.ProposalBlockParts if it is already set. + // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. + // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! + if cs.ProposalBlockParts == nil { + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) + } + cs.Logger.Info("Received proposal", "proposal", proposal) + return nil +} diff --git a/test/maverick/consensus/msgs.go b/test/maverick/consensus/msgs.go new file mode 100644 index 000000000..4de96b5f4 --- /dev/null +++ b/test/maverick/consensus/msgs.go @@ -0,0 +1,377 @@ +package consensus + +import ( + "errors" + "fmt" + + "github.com/gogo/protobuf/proto" + + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/bits" + tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// MsgToProto takes a consensus message type and returns the proto defined consensus message +func MsgToProto(msg Message) (*tmcons.Message, error) { + if msg == nil { + return nil, errors.New("consensus: message is nil") + } + var pb tmcons.Message + + switch msg := msg.(type) { + case *NewRoundStepMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_NewRoundStep{ + NewRoundStep: &tmcons.NewRoundStep{ + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + SecondsSinceStartTime: msg.SecondsSinceStartTime, + LastCommitRound: msg.LastCommitRound, + }, + }, + } + case *NewValidBlockMessage: + pbPartSetHeader := msg.BlockPartSetHeader.ToProto() + pbBits := msg.BlockParts.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_NewValidBlock{ + NewValidBlock: &tmcons.NewValidBlock{ + Height: msg.Height, + Round: msg.Round, + BlockPartSetHeader: pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.IsCommit, + }, + }, + } + case *ProposalMessage: + pbP := msg.Proposal.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Proposal{ + Proposal: &tmcons.Proposal{ + Proposal: *pbP, + }, + }, + } + case *ProposalPOLMessage: + pbBits := msg.ProposalPOL.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_ProposalPol{ + ProposalPol: &tmcons.ProposalPOL{ + Height: msg.Height, + ProposalPolRound: msg.ProposalPOLRound, + ProposalPol: *pbBits, + }, + }, + } + case *BlockPartMessage: + parts, err := msg.Part.ToProto() + if err != nil { + return nil, fmt.Errorf("msg to proto error: %w", err) + } + pb = tmcons.Message{ + Sum: &tmcons.Message_BlockPart{ + BlockPart: &tmcons.BlockPart{ + Height: msg.Height, + Round: msg.Round, + Part: *parts, + }, + }, + } + case *VoteMessage: + vote := msg.Vote.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_Vote{ + Vote: &tmcons.Vote{ + Vote: vote, + }, + }, + } + case *HasVoteMessage: + pb = tmcons.Message{ + Sum: &tmcons.Message_HasVote{ + HasVote: &tmcons.HasVote{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + Index: msg.Index, + }, + }, + } + case *VoteSetMaj23Message: + bi := msg.BlockID.ToProto() + pb = tmcons.Message{ + Sum: &tmcons.Message_VoteSetMaj23{ + VoteSetMaj23: &tmcons.VoteSetMaj23{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + }, + } + case *VoteSetBitsMessage: + bi := msg.BlockID.ToProto() + bits := msg.Votes.ToProto() + + vsb := &tmcons.Message_VoteSetBits{ + VoteSetBits: &tmcons.VoteSetBits{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: bi, + }, + } + + if bits != nil { + vsb.VoteSetBits.Votes = *bits + } + + pb = tmcons.Message{ + Sum: vsb, + } + + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + return &pb, nil +} + +// MsgFromProto takes a consensus proto message and returns the native go type +func MsgFromProto(msg *tmcons.Message) (Message, error) { + if msg == nil { + return nil, errors.New("consensus: nil message") + } + var pb Message + + switch msg := msg.Sum.(type) { + case *tmcons.Message_NewRoundStep: + rs, err := tmmath.SafeConvertUint8(int64(msg.NewRoundStep.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = &NewRoundStepMessage{ + Height: msg.NewRoundStep.Height, + Round: msg.NewRoundStep.Round, + Step: cstypes.RoundStepType(rs), + SecondsSinceStartTime: msg.NewRoundStep.SecondsSinceStartTime, + LastCommitRound: msg.NewRoundStep.LastCommitRound, + } + case *tmcons.Message_NewValidBlock: + pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + if err != nil { + return nil, fmt.Errorf("parts to proto error: %w", err) + } + + pbBits := new(bits.BitArray) + pbBits.FromProto(msg.NewValidBlock.BlockParts) + + pb = &NewValidBlockMessage{ + Height: msg.NewValidBlock.Height, + Round: msg.NewValidBlock.Round, + BlockPartSetHeader: *pbPartSetHeader, + BlockParts: pbBits, + IsCommit: msg.NewValidBlock.IsCommit, + } + case *tmcons.Message_Proposal: + pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + if err != nil { + return nil, fmt.Errorf("proposal msg to proto error: %w", err) + } + + pb = &ProposalMessage{ + Proposal: pbP, + } + case *tmcons.Message_ProposalPol: + pbBits := new(bits.BitArray) + pbBits.FromProto(&msg.ProposalPol.ProposalPol) + pb = &ProposalPOLMessage{ + Height: msg.ProposalPol.Height, + ProposalPOLRound: msg.ProposalPol.ProposalPolRound, + ProposalPOL: pbBits, + } + case *tmcons.Message_BlockPart: + parts, err := types.PartFromProto(&msg.BlockPart.Part) + if err != nil { + return nil, fmt.Errorf("blockpart msg to proto error: %w", err) + } + pb = &BlockPartMessage{ + Height: msg.BlockPart.Height, + Round: msg.BlockPart.Round, + Part: parts, + } + case *tmcons.Message_Vote: + vote, err := types.VoteFromProto(msg.Vote.Vote) + if err != nil { + return nil, fmt.Errorf("vote msg to proto error: %w", err) + } + + pb = &VoteMessage{ + Vote: vote, + } + case *tmcons.Message_HasVote: + pb = &HasVoteMessage{ + Height: msg.HasVote.Height, + Round: msg.HasVote.Round, + Type: msg.HasVote.Type, + Index: msg.HasVote.Index, + } + case *tmcons.Message_VoteSetMaj23: + bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + if err != nil { + return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) + } + pb = &VoteSetMaj23Message{ + Height: msg.VoteSetMaj23.Height, + Round: msg.VoteSetMaj23.Round, + Type: msg.VoteSetMaj23.Type, + BlockID: *bi, + } + case *tmcons.Message_VoteSetBits: + bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + if err != nil { + return nil, fmt.Errorf("voteSetBits msg to proto error: %w", err) + } + bits := new(bits.BitArray) + bits.FromProto(&msg.VoteSetBits.Votes) + + pb = &VoteSetBitsMessage{ + Height: msg.VoteSetBits.Height, + Round: msg.VoteSetBits.Round, + Type: msg.VoteSetBits.Type, + BlockID: *bi, + Votes: bits, + } + default: + return nil, fmt.Errorf("consensus: message not recognized: %T", msg) + } + + if err := pb.ValidateBasic(); err != nil { + return nil, err + } + + return pb, nil +} + +// MustEncode takes the reactors msg, makes it proto and marshals it +// this mimics `MustMarshalBinaryBare` in that is panics on error +func MustEncode(msg Message) []byte { + pb, err := MsgToProto(msg) + if err != nil { + panic(err) + } + enc, err := proto.Marshal(pb) + if err != nil { + panic(err) + } + return enc +} + +// WALToProto takes a WAL message and return a proto walMessage and error +func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { + var pb tmcons.WALMessage + + switch msg := msg.(type) { + case types.EventDataRoundState: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EventDataRoundState{ + EventDataRoundState: &tmproto.EventDataRoundState{ + Height: msg.Height, + Round: msg.Round, + Step: msg.Step, + }, + }, + } + case msgInfo: + consMsg, err := MsgToProto(msg.Msg) + if err != nil { + return nil, err + } + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_MsgInfo{ + MsgInfo: &tmcons.MsgInfo{ + Msg: *consMsg, + PeerID: string(msg.PeerID), + }, + }, + } + case timeoutInfo: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_TimeoutInfo{ + TimeoutInfo: &tmcons.TimeoutInfo{ + Duration: msg.Duration, + Height: msg.Height, + Round: msg.Round, + Step: uint32(msg.Step), + }, + }, + } + case EndHeightMessage: + pb = tmcons.WALMessage{ + Sum: &tmcons.WALMessage_EndHeight{ + EndHeight: &tmcons.EndHeight{ + Height: msg.Height, + }, + }, + } + default: + return nil, fmt.Errorf("to proto: wal message not recognized: %T", msg) + } + + return &pb, nil +} + +// WALFromProto takes a proto wal message and return a consensus walMessage and error +func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { + if msg == nil { + return nil, errors.New("nil WAL message") + } + var pb WALMessage + + switch msg := msg.Sum.(type) { + case *tmcons.WALMessage_EventDataRoundState: + pb = types.EventDataRoundState{ + Height: msg.EventDataRoundState.Height, + Round: msg.EventDataRoundState.Round, + Step: msg.EventDataRoundState.Step, + } + case *tmcons.WALMessage_MsgInfo: + walMsg, err := MsgFromProto(&msg.MsgInfo.Msg) + if err != nil { + return nil, fmt.Errorf("msgInfo from proto error: %w", err) + } + pb = msgInfo{ + Msg: walMsg, + PeerID: p2p.ID(msg.MsgInfo.PeerID), + } + + case *tmcons.WALMessage_TimeoutInfo: + tis, err := tmmath.SafeConvertUint8(int64(msg.TimeoutInfo.Step)) + // deny message based on possible overflow + if err != nil { + return nil, fmt.Errorf("denying message due to possible overflow: %w", err) + } + pb = timeoutInfo{ + Duration: msg.TimeoutInfo.Duration, + Height: msg.TimeoutInfo.Height, + Round: msg.TimeoutInfo.Round, + Step: cstypes.RoundStepType(tis), + } + return pb, nil + case *tmcons.WALMessage_EndHeight: + pb := EndHeightMessage{ + Height: msg.EndHeight.Height, + } + return pb, nil + default: + return nil, fmt.Errorf("from proto: wal message not recognized: %T", msg) + } + return pb, nil +} diff --git a/test/maverick/consensus/reactor.go b/test/maverick/consensus/reactor.go new file mode 100644 index 000000000..c82656115 --- /dev/null +++ b/test/maverick/consensus/reactor.go @@ -0,0 +1,1720 @@ +package consensus + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/bits" + tmevents "github.com/tendermint/tendermint/libs/events" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmsync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/p2p" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" +) + +const ( + StateChannel = byte(0x20) + DataChannel = byte(0x21) + VoteChannel = byte(0x22) + VoteSetBitsChannel = byte(0x23) + + maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. + + blocksToContributeToBecomeGoodPeer = 10000 + votesToContributeToBecomeGoodPeer = 10000 +) + +//----------------------------------------------------------------------------- + +// Reactor defines a reactor for the consensus service. +type Reactor struct { + p2p.BaseReactor // BaseService + p2p.Switch + + conS *State + + mtx tmsync.RWMutex + waitSync bool + eventBus *types.EventBus + + Metrics *Metrics +} + +type ReactorOption func(*Reactor) + +// NewReactor returns a new Reactor with the given +// consensusState. +func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { + conR := &Reactor{ + conS: consensusState, + waitSync: waitSync, + Metrics: NopMetrics(), + } + conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) + + for _, option := range options { + option(conR) + } + + return conR +} + +// OnStart implements BaseService by subscribing to events, which later will be +// broadcasted to other peers and starting state if we're not in fast sync. +func (conR *Reactor) OnStart() error { + conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync()) + + // start routine that computes peer statistics for evaluating peer quality + go conR.peerStatsRoutine() + + conR.subscribeToBroadcastEvents() + + if !conR.WaitSync() { + conR.conS.SetSwitch(conR.Switch) + err := conR.conS.Start() + if err != nil { + return err + } + } + + return nil +} + +// OnStop implements BaseService by unsubscribing from events and stopping +// state. +func (conR *Reactor) OnStop() { + conR.unsubscribeFromBroadcastEvents() + if err := conR.conS.Stop(); err != nil { + conR.Logger.Error("Error stopping consensus state", "err", err) + } + if !conR.WaitSync() { + conR.conS.Wait() + } +} + +// SwitchToConsensus switches from fast_sync mode to consensus mode. +// It resets the state, turns off fast_sync, and starts the consensus state-machine +func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { + conR.Logger.Info("SwitchToConsensus") + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + conR.conS.reconstructLastCommit(state) + } + + // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a + // NewRoundStepMessage. + conR.conS.updateToState(state) + + conR.mtx.Lock() + conR.waitSync = false + conR.mtx.Unlock() + conR.Metrics.FastSyncing.Set(0) + conR.Metrics.StateSyncing.Set(0) + + if skipWAL { + conR.conS.doWALCatchup = false + } + conR.conS.SetSwitch(conR.Switch) + err := conR.conS.Start() + if err != nil { + panic(fmt.Sprintf(`Failed to start consensus state: %v + +conS: +%+v + +conR: +%+v`, err, conR.conS, conR)) + } +} + +// GetChannels implements Reactor +func (conR *Reactor) GetChannels() []*p2p.ChannelDescriptor { + // TODO optimize + return []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: DataChannel, // maybe split between gossiping current block and catchup stuff + // once we gossip the whole block there's nothing left to send until next height or round + Priority: 10, + SendQueueCapacity: 100, + RecvBufferCapacity: 50 * 4096, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteChannel, + Priority: 5, + SendQueueCapacity: 100, + RecvBufferCapacity: 100 * 100, + RecvMessageCapacity: maxMsgSize, + }, + { + ID: VoteSetBitsChannel, + Priority: 1, + SendQueueCapacity: 2, + RecvBufferCapacity: 1024, + RecvMessageCapacity: maxMsgSize, + }, + } +} + +// InitPeer implements Reactor by creating a state for the peer. +func (conR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer { + peerState := NewPeerState(peer).SetLogger(conR.Logger) + peer.Set(types.PeerStateKey, peerState) + return peer +} + +// AddPeer implements Reactor by spawning multiple gossiping goroutines for the +// peer. +func (conR *Reactor) AddPeer(peer p2p.Peer) { + if !conR.IsRunning() { + return + } + + peerState, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("peer %v has no state", peer)) + } + // Begin routines for this peer. + go conR.gossipDataRoutine(peer, peerState) + go conR.gossipVotesRoutine(peer, peerState) + go conR.queryMaj23Routine(peer, peerState) + + // Send our state to peer. + // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !conR.WaitSync() { + conR.sendNewRoundStepMessage(peer) + } +} + +// RemovePeer is a noop. +func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { + if !conR.IsRunning() { + return + } + // TODO + // ps, ok := peer.Get(PeerStateKey).(*PeerState) + // if !ok { + // panic(fmt.Sprintf("Peer %v has no state", peer)) + // } + // ps.Disconnect() +} + +// Receive implements Reactor +// NOTE: We process these messages even when we're fast_syncing. +// Messages affect either a peer state or the consensus state. +// Peer state updates can happen in parallel, but processing of +// proposals, block parts, and votes are ordered by the receiveRoutine +// NOTE: blocks on consensus state for proposals, block parts, and votes +func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + if !conR.IsRunning() { + conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) + return + } + + msg, err := decodeMsg(msgBytes) + if err != nil { + conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + conR.Switch.StopPeerForError(src, err) + return + } + + if err = msg.ValidateBasic(); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + + conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) + + // Get peer states + ps, ok := src.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", src)) + } + + switch chID { + case StateChannel: + switch msg := msg.(type) { + case *NewRoundStepMessage: + conR.conS.mtx.Lock() + initialHeight := conR.conS.state.InitialHeight + conR.conS.mtx.Unlock() + if err = msg.ValidateHeight(initialHeight); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + ps.ApplyNewRoundStepMessage(msg) + case *NewValidBlockMessage: + ps.ApplyNewValidBlockMessage(msg) + case *HasVoteMessage: + ps.ApplyHasVoteMessage(msg) + case *VoteSetMaj23Message: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + if height != msg.Height { + return + } + // Peer claims to have a maj23 for some BlockID at H,R,S, + err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) + if err != nil { + conR.Switch.StopPeerForError(src, err) + return + } + // Respond with a VoteSetBitsMessage showing which votes we have. + // (and consequently shows which we don't have) + var ourVotes *bits.BitArray + switch msg.Type { + case tmproto.PrevoteType: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case tmproto.PrecommitType: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") + } + src.TrySend(VoteSetBitsChannel, MustEncode(&VoteSetBitsMessage{ + Height: msg.Height, + Round: msg.Round, + Type: msg.Type, + BlockID: msg.BlockID, + Votes: ourVotes, + })) + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case DataChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *ProposalMessage: + ps.SetHasProposal(msg.Proposal) + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + case *ProposalPOLMessage: + ps.ApplyProposalPOLMessage(msg) + case *BlockPartMessage: + ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) + conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) + conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} + default: + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteMessage: + cs := conR.conS + cs.mtx.RLock() + height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() + cs.mtx.RUnlock() + ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height-1, lastCommitSize) + ps.SetHasVote(msg.Vote) + + cs.peerMsgQueue <- msgInfo{msg, src.ID()} + + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + case VoteSetBitsChannel: + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) + return + } + switch msg := msg.(type) { + case *VoteSetBitsMessage: + cs := conR.conS + cs.mtx.Lock() + height, votes := cs.Height, cs.Votes + cs.mtx.Unlock() + + if height == msg.Height { + var ourVotes *bits.BitArray + switch msg.Type { + case tmproto.PrevoteType: + ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) + case tmproto.PrecommitType: + ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) + default: + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") + } + ps.ApplyVoteSetBitsMessage(msg, ourVotes) + } else { + ps.ApplyVoteSetBitsMessage(msg, nil) + } + default: + // don't punish (leave room for soft upgrades) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) + } + + default: + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) + } +} + +// SetEventBus sets event bus. +func (conR *Reactor) SetEventBus(b *types.EventBus) { + conR.eventBus = b + conR.conS.SetEventBus(b) +} + +// WaitSync returns whether the consensus reactor is waiting for state/fast sync. +func (conR *Reactor) WaitSync() bool { + conR.mtx.RLock() + defer conR.mtx.RUnlock() + return conR.waitSync +} + +//-------------------------------------- + +// subscribeToBroadcastEvents subscribes for new round steps and votes +// using internal pubsub defined on state to broadcast +// them to peers upon receiving. +func (conR *Reactor) subscribeToBroadcastEvents() { + const subscriber = "consensus-reactor" + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, + func(data tmevents.EventData) { + conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, + func(data tmevents.EventData) { + conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + + if err := conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, + func(data tmevents.EventData) { + conR.broadcastHasVoteMessage(data.(*types.Vote)) + }); err != nil { + conR.Logger.Error("Error adding listener for events", "err", err) + } + +} + +func (conR *Reactor) unsubscribeFromBroadcastEvents() { + const subscriber = "consensus-reactor" + conR.conS.evsw.RemoveListener(subscriber) +} + +func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { + nrsMsg := makeRoundStepMessage(rs) + conR.Switch.Broadcast(StateChannel, MustEncode(nrsMsg)) +} + +func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { + csMsg := &NewValidBlockMessage{ + Height: rs.Height, + Round: rs.Round, + BlockPartSetHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + IsCommit: rs.Step == cstypes.RoundStepCommit, + } + conR.Switch.Broadcast(StateChannel, MustEncode(csMsg)) +} + +// Broadcasts HasVoteMessage to peers that care. +func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { + msg := &HasVoteMessage{ + Height: vote.Height, + Round: vote.Round, + Type: vote.Type, + Index: vote.ValidatorIndex, + } + conR.Switch.Broadcast(StateChannel, MustEncode(msg)) + /* + // TODO: Make this broadcast more selective. + for _, peer := range conR.Switch.Peers().List() { + ps, ok := peer.Get(PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + prs := ps.GetRoundState() + if prs.Height == vote.Height { + // TODO: Also filter on round? + peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) + } else { + // Height doesn't match + // TODO: check a field, maybe CatchupCommitRound? + // TODO: But that requires changing the struct field comment. + } + } + */ +} + +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { + nrsMsg = &NewRoundStepMessage{ + Height: rs.Height, + Round: rs.Round, + Step: rs.Step, + SecondsSinceStartTime: int64(time.Since(rs.StartTime).Seconds()), + LastCommitRound: rs.LastCommit.GetRound(), + } + return +} + +func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { + rs := conR.conS.GetRoundState() + nrsMsg := makeRoundStepMessage(rs) + peer.Send(StateChannel, MustEncode(nrsMsg)) +} + +func (conR *Reactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipDataRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + // Send proposal Block parts? + if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { + if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { + part := rs.ProposalBlockParts.GetPart(index) + msg := &BlockPartMessage{ + Height: rs.Height, // This tells peer that this part applies to us. + Round: rs.Round, // This tells peer that this part applies to us. + Part: part, + } + logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, MustEncode(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } + continue OUTER_LOOP + } + } + + // If the peer is on a previous height that we have, help catch up. + if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { + heightLogger := logger.With("height", prs.Height) + + // if we never received the commit message from the peer, the block parts wont be initialized + if prs.ProposalBlockParts == nil { + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + heightLogger.Error("Failed to load block meta", + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + } else { + ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) + } + // continue the loop since prs is a copy and not effected by this initialization + continue OUTER_LOOP + } + conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) + continue OUTER_LOOP + } + + // If height and round don't match, sleep. + if (rs.Height != prs.Height) || (rs.Round != prs.Round) { + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } + + // By here, height and round match. + // Proposal block parts were already matched and sent if any were wanted. + // (These can match on hash so the round doesn't matter) + // Now consider sending other things, like the Proposal itself. + + // Send Proposal && ProposalPOL BitArray? + if rs.Proposal != nil && !prs.Proposal { + // Proposal: share the proposal metadata with peer. + { + msg := &ProposalMessage{Proposal: rs.Proposal} + logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) + if peer.Send(DataChannel, MustEncode(msg)) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! + ps.SetHasProposal(rs.Proposal) + } + } + // ProposalPOL: lets peer know which POL votes we have so far. + // Peer must receive ProposalMessage first. + // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, + // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). + if 0 <= rs.Proposal.POLRound { + msg := &ProposalPOLMessage{ + Height: rs.Height, + ProposalPOLRound: rs.Proposal.POLRound, + ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), + } + logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) + peer.Send(DataChannel, MustEncode(msg)) + } + continue OUTER_LOOP + } + + // Nothing to do. Sleep. + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } +} + +func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { + + if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { + // Ensure that the peer's PartSetHeader is correct + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + logger.Error("Failed to load block meta", "ourHeight", rs.Height, + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } else if !blockMeta.BlockID.PartSetHeader.Equals(prs.ProposalBlockPartSetHeader) { + logger.Info("Peer ProposalBlockPartSetHeader mismatch, sleeping", + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } + // Load the part + part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) + if part == nil { + logger.Error("Could not load part", "index", index, + "blockPartSetHeader", blockMeta.BlockID.PartSetHeader, "peerBlockPartSetHeader", prs.ProposalBlockPartSetHeader) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + return + } + // Send the part + msg := &BlockPartMessage{ + Height: prs.Height, // Not our height, so it doesn't matter. + Round: prs.Round, // Not our height, so it doesn't matter. + Part: part, + } + logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) + if peer.Send(DataChannel, MustEncode(msg)) { + ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } else { + logger.Debug("Sending block part for catchup failed") + } + return + } + time.Sleep(conR.conS.config.PeerGossipSleepDuration) +} + +func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + + // Simple hack to throttle logs upon sleep. + var sleeping = 0 + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping gossipVotesRoutine for peer") + return + } + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + + switch sleeping { + case 1: // First sleep + sleeping = 2 + case 2: // No more sleep + sleeping = 0 + } + + // If height matches, then send LastCommit, Prevotes, Precommits. + if rs.Height == prs.Height { + heightLogger := logger.With("height", prs.Height) + if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { + continue OUTER_LOOP + } + } + + // Special catchup logic. + // If peer is lagging by height 1, send LastCommit. + if prs.Height != 0 && rs.Height == prs.Height+1 { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + + // Catchup logic + // If peer is lagging by more than 1, send Commit. + if prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= conR.conS.blockStore.Base() { + // Load the block commit for prs.Height, + // which contains precommit signatures for prs.Height. + if commit := conR.conS.blockStore.LoadBlockCommit(prs.Height); commit != nil { + if ps.PickSendVote(commit) { + logger.Debug("Picked Catchup commit to send", "height", prs.Height) + continue OUTER_LOOP + } + } + } + + if sleeping == 0 { + // We sent nothing. Sleep... + sleeping = 1 + logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height, + "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, + "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits) + } else if sleeping == 2 { + // Continued sleep... + sleeping = 1 + } + + time.Sleep(conR.conS.config.PeerGossipSleepDuration) + continue OUTER_LOOP + } +} + +func (conR *Reactor) gossipVotesForHeight( + logger log.Logger, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + ps *PeerState, +) bool { + + // If there are lastCommits to send... + if prs.Step == cstypes.RoundStepNewHeight { + if ps.PickSendVote(rs.LastCommit) { + logger.Debug("Picked rs.LastCommit to send") + return true + } + } + // If there are POL prevotes to send... + if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + // If there are prevotes to send... + if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are precommits to send... + if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { + logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are prevotes to send...Needed because of validBlock mechanism + if prs.Round != -1 && prs.Round <= rs.Round { + if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { + logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) + return true + } + } + // If there are POLPrevotes to send... + if prs.ProposalPOLRound != -1 { + if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { + if ps.PickSendVote(polPrevotes) { + logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", + "round", prs.ProposalPOLRound) + return true + } + } + } + + return false +} + +// NOTE: `queryMaj23Routine` has a simple crude design since it only comes +// into play for liveness when there's a signature DDoS attack happening. +func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { + logger := conR.Logger.With("peer", peer) + +OUTER_LOOP: + for { + // Manage disconnects from self or peer. + if !peer.IsRunning() || !conR.IsRunning() { + logger.Info("Stopping queryMaj23Routine for peer") + return + } + + // Maybe send Height/Round/Prevotes + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrevoteType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Maybe send Height/Round/Precommits + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height { + if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Maybe send Height/Round/ProposalPOL + { + rs := conR.conS.GetRoundState() + prs := ps.GetRoundState() + if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { + if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: prs.ProposalPOLRound, + Type: tmproto.PrevoteType, + BlockID: maj23, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + // Little point sending LastCommitRound/LastCommit, + // These are fleeting and non-blocking. + + // Maybe send Height/CatchupCommitRound/CatchupCommit. + { + prs := ps.GetRoundState() + if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && + prs.Height >= conR.conS.blockStore.Base() { + if commit := conR.conS.LoadCommit(prs.Height); commit != nil { + peer.TrySend(StateChannel, MustEncode(&VoteSetMaj23Message{ + Height: prs.Height, + Round: commit.Round, + Type: tmproto.PrecommitType, + BlockID: commit.BlockID, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } + } + } + + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + + continue OUTER_LOOP + } +} + +func (conR *Reactor) peerStatsRoutine() { + for { + if !conR.IsRunning() { + conR.Logger.Info("Stopping peerStatsRoutine") + return + } + + select { + case msg := <-conR.conS.statsMsgQueue: + // Get peer + peer := conR.Switch.Peers().Get(msg.PeerID) + if peer == nil { + conR.Logger.Debug("Attempt to update stats for non-existent peer", + "peer", msg.PeerID) + continue + } + // Get peer state + ps, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + switch msg.Msg.(type) { + case *VoteMessage: + if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + case *BlockPartMessage: + if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + } + case <-conR.conS.Quit(): + return + + case <-conR.Quit(): + return + } + } +} + +// String returns a string representation of the Reactor. +// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. +// TODO: improve! +func (conR *Reactor) String() string { + // better not to access shared variables + return "ConsensusReactor" // conR.StringIndented("") +} + +// StringIndented returns an indented string representation of the Reactor +func (conR *Reactor) StringIndented(indent string) string { + s := "ConsensusReactor{\n" + s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" + for _, peer := range conR.Switch.Peers().List() { + ps, ok := peer.Get(types.PeerStateKey).(*PeerState) + if !ok { + panic(fmt.Sprintf("Peer %v has no state", peer)) + } + s += indent + " " + ps.StringIndented(indent+" ") + "\n" + } + s += indent + "}" + return s +} + +// ReactorMetrics sets the metrics +func ReactorMetrics(metrics *Metrics) ReactorOption { + return func(conR *Reactor) { conR.Metrics = metrics } +} + +//----------------------------------------------------------------------------- + +var ( + ErrPeerStateHeightRegression = errors.New("error peer state height regression") + ErrPeerStateInvalidStartTime = errors.New("error peer state invalid startTime") +) + +// PeerState contains the known state of a peer, including its connection and +// threadsafe access to its PeerRoundState. +// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. +// Be mindful of what you Expose. +type PeerState struct { + peer p2p.Peer + logger log.Logger + + mtx sync.Mutex // NOTE: Modify below using setters, never directly. + PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. + Stats *peerStateStats `json:"stats"` // Exposed. +} + +// peerStateStats holds internal statistics for a peer. +type peerStateStats struct { + Votes int `json:"votes"` + BlockParts int `json:"block_parts"` +} + +func (pss peerStateStats) String() string { + return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", + pss.Votes, pss.BlockParts) +} + +// NewPeerState returns a new PeerState for the given Peer +func NewPeerState(peer p2p.Peer) *PeerState { + return &PeerState{ + peer: peer, + logger: log.NewNopLogger(), + PRS: cstypes.PeerRoundState{ + Round: -1, + ProposalPOLRound: -1, + LastCommitRound: -1, + CatchupCommitRound: -1, + }, + Stats: &peerStateStats{}, + } +} + +// SetLogger allows to set a logger on the peer state. Returns the peer state +// itself. +func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { + ps.logger = logger + return ps +} + +// GetRoundState returns an shallow copy of the PeerRoundState. +// There's no point in mutating it since it won't change PeerState. +func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + prs := ps.PRS // copy + return &prs +} + +// ToJSON returns a json of PeerState. +func (ps *PeerState) ToJSON() ([]byte, error) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return tmjson.Marshal(ps) +} + +// GetHeight returns an atomic snapshot of the PeerRoundState's height +// used by the mempool to ensure peers are caught up before broadcasting new txs +func (ps *PeerState) GetHeight() int64 { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return ps.PRS.Height +} + +// SetHasProposal sets the given proposal as known for the peer. +func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { + return + } + + if ps.PRS.Proposal { + return + } + + ps.PRS.Proposal = true + + // ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartSetHeader = proposal.BlockID.PartSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(proposal.BlockID.PartSetHeader.Total)) + ps.PRS.ProposalPOLRound = proposal.POLRound + ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. +} + +// InitProposalBlockParts initializes the peer's proposal block parts header and bit array. +func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartSetHeader = partSetHeader + ps.PRS.ProposalBlockParts = bits.NewBitArray(int(partSetHeader.Total)) +} + +// SetHasProposalBlockPart sets the given block part index as known for the peer. +func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != height || ps.PRS.Round != round { + return + } + + ps.PRS.ProposalBlockParts.SetIndex(index, true) +} + +// PickSendVote picks a vote and sends it to the peer. +// Returns true if vote was sent. +func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { + if vote, ok := ps.PickVoteToSend(votes); ok { + msg := &VoteMessage{vote} + ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) + if ps.peer.Send(VoteChannel, MustEncode(msg)) { + ps.SetHasVote(vote) + return true + } + return false + } + return false +} + +// PickVoteToSend picks a vote to send to the peer. +// Returns true if a vote was picked. +// NOTE: `votes` must be the correct Size() for the Height(). +func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if votes.Size() == 0 { + return nil, false + } + + height, round, votesType, size := + votes.GetHeight(), votes.GetRound(), tmproto.SignedMsgType(votes.Type()), votes.Size() + + // Lazily set data using 'votes'. + if votes.IsCommit() { + ps.ensureCatchupCommitRound(height, round, size) + } + ps.ensureVoteBitArrays(height, size) + + psVotes := ps.getVoteBitArray(height, round, votesType) + if psVotes == nil { + return nil, false // Not something worth sending + } + if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { + return votes.GetByIndex(int32(index)), true + } + return nil, false +} + +func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray { + if !types.IsVoteTypeValid(votesType) { + return nil + } + + if ps.PRS.Height == height { + if ps.PRS.Round == round { + switch votesType { + case tmproto.PrevoteType: + return ps.PRS.Prevotes + case tmproto.PrecommitType: + return ps.PRS.Precommits + } + } + if ps.PRS.CatchupCommitRound == round { + switch votesType { + case tmproto.PrevoteType: + return nil + case tmproto.PrecommitType: + return ps.PRS.CatchupCommit + } + } + if ps.PRS.ProposalPOLRound == round { + switch votesType { + case tmproto.PrevoteType: + return ps.PRS.ProposalPOL + case tmproto.PrecommitType: + return nil + } + } + return nil + } + if ps.PRS.Height == height+1 { + if ps.PRS.LastCommitRound == round { + switch votesType { + case tmproto.PrevoteType: + return nil + case tmproto.PrecommitType: + return ps.PRS.LastCommit + } + } + return nil + } + return nil +} + +// 'round': A round for which we have a +2/3 commit. +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int32, numValidators int) { + if ps.PRS.Height != height { + return + } + /* + NOTE: This is wrong, 'round' could change. + e.g. if orig round is not the same as block LastCommit round. + if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { + panic(fmt.Sprintf( + "Conflicting CatchupCommitRound. Height: %v, + Orig: %v, + New: %v", + height, + ps.CatchupCommitRound, + round)) + } + */ + if ps.PRS.CatchupCommitRound == round { + return // Nothing to do! + } + ps.PRS.CatchupCommitRound = round + if round == ps.PRS.Round { + ps.PRS.CatchupCommit = ps.PRS.Precommits + } else { + ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + } +} + +// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking +// what votes this peer has received. +// NOTE: It's important to make sure that numValidators actually matches +// what the node sees as the number of validators for height. +func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + ps.ensureVoteBitArrays(height, numValidators) +} + +func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { + if ps.PRS.Height == height { + if ps.PRS.Prevotes == nil { + ps.PRS.Prevotes = bits.NewBitArray(numValidators) + } + if ps.PRS.Precommits == nil { + ps.PRS.Precommits = bits.NewBitArray(numValidators) + } + if ps.PRS.CatchupCommit == nil { + ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + } + if ps.PRS.ProposalPOL == nil { + ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) + } + } else if ps.PRS.Height == height+1 { + if ps.PRS.LastCommit == nil { + ps.PRS.LastCommit = bits.NewBitArray(numValidators) + } + } +} + +// RecordVote increments internal votes related statistics for this peer. +// It returns the total number of added votes. +func (ps *PeerState) RecordVote() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.Stats.Votes++ + + return ps.Stats.Votes +} + +// VotesSent returns the number of blocks for which peer has been sending us +// votes. +func (ps *PeerState) VotesSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.Votes +} + +// RecordBlockPart increments internal block part related statistics for this peer. +// It returns the total number of added block parts. +func (ps *PeerState) RecordBlockPart() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.Stats.BlockParts++ + return ps.Stats.BlockParts +} + +// BlockPartsSent returns the number of useful block parts the peer has sent us. +func (ps *PeerState) BlockPartsSent() int { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + return ps.Stats.BlockParts +} + +// SetHasVote sets the given vote as known by the peer +func (ps *PeerState) SetHasVote(vote *types.Vote) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) +} + +func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) { + logger := ps.logger.With( + "peerH/R", + fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), + "H/R", + fmt.Sprintf("%d/%d", height, round)) + logger.Debug("setHasVote", "type", voteType, "index", index) + + // NOTE: some may be nil BitArrays -> no side effects. + psVotes := ps.getVoteBitArray(height, round, voteType) + if psVotes != nil { + psVotes.SetIndex(int(index), true) + } +} + +// ApplyNewRoundStepMessage updates the peer state for the new round. +func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + // Ignore duplicates or decreases + if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { + return + } + + // Just remember these values. + psHeight := ps.PRS.Height + psRound := ps.PRS.Round + psCatchupCommitRound := ps.PRS.CatchupCommitRound + psCatchupCommit := ps.PRS.CatchupCommit + + startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) + ps.PRS.Height = msg.Height + ps.PRS.Round = msg.Round + ps.PRS.Step = msg.Step + ps.PRS.StartTime = startTime + if psHeight != msg.Height || psRound != msg.Round { + ps.PRS.Proposal = false + ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{} + ps.PRS.ProposalBlockParts = nil + ps.PRS.ProposalPOLRound = -1 + ps.PRS.ProposalPOL = nil + // We'll update the BitArray capacity later. + ps.PRS.Prevotes = nil + ps.PRS.Precommits = nil + } + if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound { + // Peer caught up to CatchupCommitRound. + // Preserve psCatchupCommit! + // NOTE: We prefer to use prs.Precommits if + // pr.Round matches pr.CatchupCommitRound. + ps.PRS.Precommits = psCatchupCommit + } + if psHeight != msg.Height { + // Shift Precommits to LastCommit. + if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = ps.PRS.Precommits + } else { + ps.PRS.LastCommitRound = msg.LastCommitRound + ps.PRS.LastCommit = nil + } + // We'll update the BitArray capacity later. + ps.PRS.CatchupCommitRound = -1 + ps.PRS.CatchupCommit = nil + } +} + +// ApplyNewValidBlockMessage updates the peer state for the new valid block. +func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + if ps.PRS.Round != msg.Round && !msg.IsCommit { + return + } + + ps.PRS.ProposalBlockPartSetHeader = msg.BlockPartSetHeader + ps.PRS.ProposalBlockParts = msg.BlockParts +} + +// ApplyProposalPOLMessage updates the peer state for the new proposal POL. +func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound { + return + } + + // TODO: Merge onto existing ps.PRS.ProposalPOL? + // We might have sent some prevotes in the meantime. + ps.PRS.ProposalPOL = msg.ProposalPOL +} + +// ApplyHasVoteMessage updates the peer state for the new vote. +func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.PRS.Height != msg.Height { + return + } + + ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) +} + +// ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes +// it claims to have for the corresponding BlockID. +// `ourVotes` is a BitArray of votes we have for msg.BlockID +// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height), +// we conservatively overwrite ps's votes w/ msg.Votes. +func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *bits.BitArray) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type) + if votes != nil { + if ourVotes == nil { + votes.Update(msg.Votes) + } else { + otherVotes := votes.Sub(ourVotes) + hasVotes := otherVotes.Or(msg.Votes) + votes.Update(hasVotes) + } + } +} + +// String returns a string representation of the PeerState +func (ps *PeerState) String() string { + return ps.StringIndented("") +} + +// StringIndented returns a string representation of the PeerState +func (ps *PeerState) StringIndented(indent string) string { + ps.mtx.Lock() + defer ps.mtx.Unlock() + return fmt.Sprintf(`PeerState{ +%s Key %v +%s RoundState %v +%s Stats %v +%s}`, + indent, ps.peer.ID(), + indent, ps.PRS.StringIndented(indent+" "), + indent, ps.Stats, + indent) +} + +//----------------------------------------------------------------------------- +// Messages + +// Message is a message that can be sent and received on the Reactor +type Message interface { + ValidateBasic() error +} + +// func init() { +// tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage") +// tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage") +// tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal") +// tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL") +// tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart") +// tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote") +// tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote") +// tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") +// tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") +// } + +func decodeMsg(bz []byte) (msg Message, err error) { + pb := &tmcons.Message{} + if err = proto.Unmarshal(bz, pb); err != nil { + return msg, err + } + + return MsgFromProto(pb) +} + +//------------------------------------- + +// NewRoundStepMessage is sent for every step taken in the ConsensusState. +// For every height/round/step transition +type NewRoundStepMessage struct { + Height int64 + Round int32 + Step cstypes.RoundStepType + SecondsSinceStartTime int64 + LastCommitRound int32 +} + +// ValidateBasic performs basic validation. +func (m *NewRoundStepMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !m.Step.IsValid() { + return errors.New("invalid Step") + } + + // NOTE: SecondsSinceStartTime may be negative + + // LastCommitRound will be -1 for the initial height, but we don't know what height this is + // since it can be specified in genesis. The reactor will have to validate this via + // ValidateHeight(). + if m.LastCommitRound < -1 { + return errors.New("invalid LastCommitRound (cannot be < -1)") + } + + return nil +} + +// ValidateHeight validates the height given the chain's initial height. +func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { + if m.Height < initialHeight { + return fmt.Errorf("invalid Height %v (lower than initial height %v)", + m.Height, initialHeight) + } + if m.Height == initialHeight && m.LastCommitRound != -1 { + return fmt.Errorf("invalid LastCommitRound %v (must be -1 for initial height %v)", + m.LastCommitRound, initialHeight) + } + if m.Height > initialHeight && m.LastCommitRound < 0 { + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + initialHeight) + } + return nil +} + +// String returns a string representation. +func (m *NewRoundStepMessage) String() string { + return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", + m.Height, m.Round, m.Step, m.LastCommitRound) +} + +//------------------------------------- + +// NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type NewValidBlockMessage struct { + Height int64 + Round int32 + BlockPartSetHeader types.PartSetHeader + BlockParts *bits.BitArray + IsCommit bool +} + +// ValidateBasic performs basic validation. +func (m *NewValidBlockMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if err := m.BlockPartSetHeader.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockPartSetHeader: %v", err) + } + if m.BlockParts.Size() == 0 { + return errors.New("empty blockParts") + } + if m.BlockParts.Size() != int(m.BlockPartSetHeader.Total) { + return fmt.Errorf("blockParts bit array size %d not equal to BlockPartSetHeader.Total %d", + m.BlockParts.Size(), + m.BlockPartSetHeader.Total) + } + if m.BlockParts.Size() > int(types.MaxBlockPartsCount) { + return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) + } + return nil +} + +// String returns a string representation. +func (m *NewValidBlockMessage) String() string { + return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", + m.Height, m.Round, m.BlockPartSetHeader, m.BlockParts, m.IsCommit) +} + +//------------------------------------- + +// ProposalMessage is sent when a new block is proposed. +type ProposalMessage struct { + Proposal *types.Proposal +} + +// ValidateBasic performs basic validation. +func (m *ProposalMessage) ValidateBasic() error { + return m.Proposal.ValidateBasic() +} + +// String returns a string representation. +func (m *ProposalMessage) String() string { + return fmt.Sprintf("[Proposal %v]", m.Proposal) +} + +//------------------------------------- + +// ProposalPOLMessage is sent when a previous proposal is re-proposed. +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int32 + ProposalPOL *bits.BitArray +} + +// ValidateBasic performs basic validation. +func (m *ProposalPOLMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.ProposalPOLRound < 0 { + return errors.New("negative ProposalPOLRound") + } + if m.ProposalPOL.Size() == 0 { + return errors.New("empty ProposalPOL bit array") + } + if m.ProposalPOL.Size() > types.MaxVotesCount { + return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) + } + return nil +} + +// String returns a string representation. +func (m *ProposalPOLMessage) String() string { + return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) +} + +//------------------------------------- + +// BlockPartMessage is sent when gossipping a piece of the proposed block. +type BlockPartMessage struct { + Height int64 + Round int32 + Part *types.Part +} + +// ValidateBasic performs basic validation. +func (m *BlockPartMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if err := m.Part.ValidateBasic(); err != nil { + return fmt.Errorf("wrong Part: %v", err) + } + return nil +} + +// String returns a string representation. +func (m *BlockPartMessage) String() string { + return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) +} + +//------------------------------------- + +// VoteMessage is sent when voting for a proposal (or lack thereof). +type VoteMessage struct { + Vote *types.Vote +} + +// ValidateBasic performs basic validation. +func (m *VoteMessage) ValidateBasic() error { + return m.Vote.ValidateBasic() +} + +// String returns a string representation. +func (m *VoteMessage) String() string { + return fmt.Sprintf("[Vote %v]", m.Vote) +} + +//------------------------------------- + +// HasVoteMessage is sent to indicate that a particular vote has been received. +type HasVoteMessage struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + Index int32 +} + +// ValidateBasic performs basic validation. +func (m *HasVoteMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if m.Index < 0 { + return errors.New("negative Index") + } + return nil +} + +// String returns a string representation. +func (m *HasVoteMessage) String() string { + return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) +} + +//------------------------------------- + +// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. +type VoteSetMaj23Message struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + BlockID types.BlockID +} + +// ValidateBasic performs basic validation. +func (m *VoteSetMaj23Message) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if m.Round < 0 { + return errors.New("negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockID: %v", err) + } + return nil +} + +// String returns a string representation. +func (m *VoteSetMaj23Message) String() string { + return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) +} + +//------------------------------------- + +// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. +type VoteSetBitsMessage struct { + Height int64 + Round int32 + Type tmproto.SignedMsgType + BlockID types.BlockID + Votes *bits.BitArray +} + +// ValidateBasic performs basic validation. +func (m *VoteSetBitsMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("negative Height") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("wrong BlockID: %v", err) + } + // NOTE: Votes.Size() can be zero if the node does not have any + if m.Votes.Size() > types.MaxVotesCount { + return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount) + } + return nil +} + +// String returns a string representation. +func (m *VoteSetBitsMessage) String() string { + return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) +} + +//------------------------------------- diff --git a/test/maverick/consensus/replay.go b/test/maverick/consensus/replay.go new file mode 100644 index 000000000..bfec9e96d --- /dev/null +++ b/test/maverick/consensus/replay.go @@ -0,0 +1,533 @@ +package consensus + +import ( + "bytes" + "fmt" + "hash/crc32" + "io" + "reflect" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +var crc32c = crc32.MakeTable(crc32.Castagnoli) + +// Functionality to replay blocks and messages on recovery from a crash. +// There are two general failure scenarios: +// +// 1. failure during consensus +// 2. failure while applying the block +// +// The former is handled by the WAL, the latter by the proxyApp Handshake on +// restart, which ultimately hands off the work to the WAL. + +//----------------------------------------- +// 1. Recover from failure during consensus +// (by replaying messages from the WAL) +//----------------------------------------- + +// Unmarshal and apply a single message to the consensus state as if it were +// received in receiveRoutine. Lines that start with "#" are ignored. +// NOTE: receiveRoutine should not be running. +func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { + // Skip meta messages which exist for demarcating boundaries. + if _, ok := msg.Msg.(EndHeightMessage); ok { + return nil + } + + // for logging + switch m := msg.Msg.(type) { + case types.EventDataRoundState: + cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) + // these are playback checks + ticker := time.After(time.Second * 2) + if newStepSub != nil { + select { + case stepMsg := <-newStepSub.Out(): + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) + } + case <-newStepSub.Cancelled(): + return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled") + case <-ticker: + return fmt.Errorf("failed to read off newStepSub.Out()") + } + } + case msgInfo: + peerID := m.PeerID + if peerID == "" { + peerID = "local" + } + switch msg := m.Msg.(type) { + case *ProposalMessage: + p := msg.Proposal + cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", + p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) + case *BlockPartMessage: + cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) + case *VoteMessage: + v := msg.Vote + cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, + "blockID", v.BlockID, "peer", peerID) + } + + cs.handleMsg(m) + case timeoutInfo: + cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) + cs.handleTimeout(m, cs.RoundState) + default: + return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) + } + return nil +} + +// Replay only those messages since the last block. `timeoutRoutine` should +// run concurrently to read off tickChan. +func (cs *State) catchupReplay(csHeight int64) error { + + // Set replayMode to true so we don't log signing errors. + cs.replayMode = true + defer func() { cs.replayMode = false }() + + // Ensure that #ENDHEIGHT for this height doesn't exist. + // NOTE: This is just a sanity check. As far as we know things work fine + // without it, and Handshake could reuse State if it weren't for + // this check (since we can crash after writing #ENDHEIGHT). + // + // Ignore data corruption errors since this is a sanity check. + gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err != nil { + return err + } + if gr != nil { + if err := gr.Close(); err != nil { + return err + } + } + if found { + return fmt.Errorf("wal should not contain #ENDHEIGHT %d", csHeight) + } + + // Search for last height marker. + // + // Ignore data corruption errors in previous heights because we only care about last height + if csHeight < cs.state.InitialHeight { + return fmt.Errorf("cannot replay height %v, below initial height %v", csHeight, cs.state.InitialHeight) + } + endHeight := csHeight - 1 + if csHeight == cs.state.InitialHeight { + endHeight = 0 + } + gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) + if err == io.EOF { + cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) + } else if err != nil { + return err + } + if !found { + return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, endHeight) + } + defer gr.Close() + + cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + + var msg *TimedWALMessage + dec := WALDecoder{gr} + +LOOP: + for { + msg, err = dec.Decode() + switch { + case err == io.EOF: + break LOOP + case IsDataCorruptionError(err): + cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) + return err + case err != nil: + return err + } + + // NOTE: since the priv key is set when the msgs are received + // it will attempt to eg double sign but we can just ignore it + // since the votes will be replayed and we'll get to the next step + if err := cs.readReplayMessage(msg, nil); err != nil { + return err + } + } + cs.Logger.Info("Replay: Done") + return nil +} + +//-------------------------------------------------------------------------------- + +// Parses marker lines of the form: +// #ENDHEIGHT: 12345 +/* +func makeHeightSearchFunc(height int64) auto.SearchFunc { + return func(line string) (int, error) { + line = strings.TrimRight(line, "\n") + parts := strings.Split(line, " ") + if len(parts) != 2 { + return -1, errors.New("line did not have 2 parts") + } + i, err := strconv.Atoi(parts[1]) + if err != nil { + return -1, errors.New("failed to parse INFO: " + err.Error()) + } + if height < i { + return 1, nil + } else if height == i { + return 0, nil + } else { + return -1, nil + } + } +}*/ + +//--------------------------------------------------- +// 2. Recover from failure while applying the block. +// (by handshaking with the app to figure out where +// we were last, and using the WAL to recover there.) +//--------------------------------------------------- + +type Handshaker struct { + stateStore sm.Store + initialState sm.State + store sm.BlockStore + eventBus types.BlockEventPublisher + genDoc *types.GenesisDoc + logger log.Logger + + nBlocks int // number of blocks applied to the state +} + +func NewHandshaker(stateStore sm.Store, state sm.State, + store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { + + return &Handshaker{ + stateStore: stateStore, + initialState: state, + store: store, + eventBus: types.NopEventBus{}, + genDoc: genDoc, + logger: log.NewNopLogger(), + nBlocks: 0, + } +} + +func (h *Handshaker) SetLogger(l log.Logger) { + h.logger = l +} + +// SetEventBus - sets the event bus for publishing block related events. +// If not called, it defaults to types.NopEventBus. +func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { + h.eventBus = eventBus +} + +// NBlocks returns the number of blocks applied to the state. +func (h *Handshaker) NBlocks() int { + return h.nBlocks +} + +// TODO: retry the handshake/replay if it fails ? +func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { + + // Handshake is done via ABCI Info on the query conn. + res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) + if err != nil { + return fmt.Errorf("error calling Info: %v", err) + } + + blockHeight := res.LastBlockHeight + if blockHeight < 0 { + return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight) + } + appHash := res.LastBlockAppHash + + h.logger.Info("ABCI Handshake App Info", + "height", blockHeight, + "hash", fmt.Sprintf("%X", appHash), + "software-version", res.Version, + "protocol-version", res.AppVersion, + ) + + // Only set the version if there is no existing state. + if h.initialState.LastBlockHeight == 0 { + h.initialState.Version.Consensus.App = res.AppVersion + } + + // Replay blocks up to the latest in the blockstore. + _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + if err != nil { + return fmt.Errorf("error on replay: %v", err) + } + + h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", + "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + + // TODO: (on restart) replay mempool + + return nil +} + +// ReplayBlocks replays all blocks since appBlockHeight and ensures the result +// matches the current state. +// Returns the final AppHash or an error. +func (h *Handshaker) ReplayBlocks( + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, +) ([]byte, error) { + storeBlockBase := h.store.Base() + storeBlockHeight := h.store.Height() + stateBlockHeight := state.LastBlockHeight + h.logger.Info( + "ABCI Replay Blocks", + "appHeight", + appBlockHeight, + "storeHeight", + storeBlockHeight, + "stateHeight", + stateBlockHeight) + + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. + if appBlockHeight == 0 { + validators := make([]*types.Validator, len(h.genDoc.Validators)) + for i, val := range h.genDoc.Validators { + validators[i] = types.NewValidator(val.PubKey, val.Power) + } + validatorSet := types.NewValidatorSet(validators) + nextVals := types.TM2PB.ValidatorUpdates(validatorSet) + csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) + req := abci.RequestInitChain{ + Time: h.genDoc.GenesisTime, + ChainId: h.genDoc.ChainID, + InitialHeight: h.genDoc.InitialHeight, + ConsensusParams: csParams, + Validators: nextVals, + AppStateBytes: h.genDoc.AppState, + } + res, err := proxyApp.Consensus().InitChainSync(req) + if err != nil { + return nil, err + } + + appHash = res.AppHash + + if stateBlockHeight == 0 { // we only update state when we are in initial state + // If the app did not return an app hash, we keep the one set from the genesis doc in + // the state. We don't set appHash since we don't want the genesis doc app hash + // recorded in the genesis block. We should probably just remove GenesisDoc.AppHash. + if len(res.AppHash) > 0 { + state.AppHash = res.AppHash + } + // If the app returned validators or consensus params, update the state. + if len(res.Validators) > 0 { + vals, err := types.PB2TM.ValidatorUpdates(res.Validators) + if err != nil { + return nil, err + } + state.Validators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) + } else if len(h.genDoc.Validators) == 0 { + // If validator set is not set in genesis and still empty after InitChain, exit. + return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") + } + + if res.ConsensusParams != nil { + state.ConsensusParams = types.UpdateConsensusParams(state.ConsensusParams, res.ConsensusParams) + state.Version.Consensus.App = state.ConsensusParams.Version.AppVersion + } + // We update the last results hash with the empty hash, to conform with RFC-6962. + state.LastResultsHash = merkle.HashFromByteSlices(nil) + if err := h.stateStore.Save(state); err != nil { + return nil, err + } + } + } + + // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase. + switch { + case storeBlockHeight == 0: + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil + + case appBlockHeight == 0 && state.InitialHeight < storeBlockBase: + // the app has no state, and the block store is truncated above the initial height + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + + case appBlockHeight > 0 && appBlockHeight < storeBlockBase-1: + // the app is too far behind truncated store (can be 1 behind since we replay the next) + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + + case storeBlockHeight < appBlockHeight: + // the app should never be ahead of the store (but this is under app's control) + return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} + + case storeBlockHeight < stateBlockHeight: + // the state should never be ahead of the store (this is under tendermint's control) + panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + + case storeBlockHeight > stateBlockHeight+1: + // store should be at most one ahead of the state (this is under tendermint's control) + panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + } + + var err error + // Now either store is equal to state, or one ahead. + // For each, consider all cases of where the app could be, given app <= store + if storeBlockHeight == stateBlockHeight { + // Tendermint ran Commit and saved the state. + // Either the app is asking for replay, or we're all synced up. + if appBlockHeight < storeBlockHeight { + // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + + } else if appBlockHeight == storeBlockHeight { + // We're good! + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil + } + + } else if storeBlockHeight == stateBlockHeight+1 { + // We saved the block in the store but haven't updated the state, + // so we'll need to replay a block using the WAL. + switch { + case appBlockHeight < stateBlockHeight: + // the app is further behind than it should be, so replay blocks + // but leave the last block to go through the WAL + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + + case appBlockHeight == stateBlockHeight: + // We haven't run Commit (both the state and app are one block behind), + // so replayBlock with the real app. + // NOTE: We could instead use the cs.WAL on cs.Start, + // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT + h.logger.Info("Replay last block using real app") + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + return state.AppHash, err + + case appBlockHeight == storeBlockHeight: + // We ran Commit, but didn't save the state, so replayBlock with mock app. + abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) + if err != nil { + return nil, err + } + mockApp := newMockProxyApp(appHash, abciResponses) + h.logger.Info("Replay last block using mock app") + state, err = h.replayBlock(state, storeBlockHeight, mockApp) + return state.AppHash, err + } + + } + + panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", + appBlockHeight, storeBlockHeight, stateBlockHeight)) +} + +func (h *Handshaker) replayBlocks( + state sm.State, + proxyApp proxy.AppConns, + appBlockHeight, + storeBlockHeight int64, + mutateState bool) ([]byte, error) { + // App is further behind than it should be, so we need to replay blocks. + // We replay all blocks from appBlockHeight+1. + // + // Note that we don't have an old version of the state, + // so we by-pass state validation/mutation using sm.ExecCommitBlock. + // This also means we won't be saving validator sets if they change during this period. + // TODO: Load the historical information to fix this and just use state.ApplyBlock + // + // If mutateState == true, the final block is replayed with h.replayBlock() + + var appHash []byte + var err error + finalBlock := storeBlockHeight + if mutateState { + finalBlock-- + } + firstBlock := appBlockHeight + 1 + if firstBlock == 1 { + firstBlock = state.InitialHeight + } + for i := firstBlock; i <= finalBlock; i++ { + h.logger.Info("Applying block", "height", i) + block := h.store.LoadBlock(i) + // Extra check to ensure the app was not changed in a way it shouldn't have. + if len(appHash) > 0 { + assertAppHashEqualsOneFromBlock(appHash, block) + } + + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight) + if err != nil { + return nil, err + } + + h.nBlocks++ + } + + if mutateState { + // sync the final block + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + if err != nil { + return nil, err + } + appHash = state.AppHash + } + + assertAppHashEqualsOneFromState(appHash, state) + return appHash, nil +} + +// ApplyBlock on the proxyApp with the last block. +func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { + block := h.store.LoadBlock(height) + meta := h.store.LoadBlockMeta(height) + + // Use stubs for both mempool and evidence pool since no transactions nor + // evidence are needed here - block already exists. + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{}) + blockExec.SetEventBus(h.eventBus) + + var err error + state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) + if err != nil { + return sm.State{}, err + } + + h.nBlocks++ + + return state, nil +} + +func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { + if !bytes.Equal(appHash, block.AppHash) { + panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. + +Block: %v +`, + appHash, block.AppHash, block)) + } +} + +func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) { + if !bytes.Equal(appHash, state.AppHash) { + panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got +%X, expected %X. + +State: %v + +Did you reset Tendermint without resetting your application's data?`, + appHash, state.AppHash, state)) + } +} diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go new file mode 100644 index 000000000..0a02031f8 --- /dev/null +++ b/test/maverick/consensus/replay_file.go @@ -0,0 +1,338 @@ +package consensus + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + + dbm "github.com/tendermint/tm-db" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +const ( + // event bus subscriber + subscriber = "replay-file" +) + +//-------------------------------------------------------- +// replay messages interactively or all at once + +// replay the wal file +func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(config, csConfig) + + if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { + tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) + } +} + +// Replay msgs in file or start the console +func (cs *State) ReplayFile(file string, console bool) error { + + if cs.IsRunning() { + return errors.New("cs is already running, cannot replay") + } + if cs.wal != nil { + return errors.New("cs wal is open, cannot replay") + } + + cs.startForReplay() + + // ensure all new step events are regenerated as expected + + ctx := context.Background() + newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + if err != nil { + return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + } + defer func() { + if err := cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + cs.Logger.Error("Error unsubscribing to event bus", "err", err) + } + }() + + // just open the file for reading, no need to use wal + fp, err := os.OpenFile(file, os.O_RDONLY, 0600) + if err != nil { + return err + } + + pb := newPlayback(file, fp, cs, cs.state.Copy()) + defer pb.fp.Close() + + var nextN int // apply N msgs in a row + var msg *TimedWALMessage + for { + if nextN == 0 && console { + nextN = pb.replayConsoleLoop() + } + + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + return err + } + + if nextN > 0 { + nextN-- + } + pb.count++ + } +} + +//------------------------------------------------ +// playback manager + +type playback struct { + cs *State + + fp *os.File + dec *WALDecoder + count int // how many lines/msgs into the file are we + + // replays can be reset to beginning + fileName string // so we can close/reopen the file + genesisState sm.State // so the replay session knows where to restart from +} + +func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback { + return &playback{ + cs: cs, + fp: fp, + fileName: fileName, + genesisState: genState, + dec: NewWALDecoder(fp), + } +} + +// go back count steps by resetting the state and running (pb.count - count) steps +func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { + if err := pb.cs.Stop(); err != nil { + return err + } + pb.cs.Wait() + + newCS := NewState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, + pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, map[int64]Misbehavior{}) + newCS.SetEventBus(pb.cs.eventBus) + newCS.startForReplay() + + if err := pb.fp.Close(); err != nil { + return err + } + fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600) + if err != nil { + return err + } + pb.fp = fp + pb.dec = NewWALDecoder(fp) + count = pb.count - count + fmt.Printf("Reseting from %d to %d\n", pb.count, count) + pb.count = 0 + pb.cs = newCS + var msg *TimedWALMessage + for i := 0; i < count; i++ { + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + return err + } + pb.count++ + } + return nil +} + +func (cs *State) startForReplay() { + cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") + /* TODO:! + // since we replay tocks we just ignore ticks + go func() { + for { + select { + case <-cs.tickChan: + case <-cs.Quit: + return + } + } + }()*/ +} + +// console function for parsing input and running commands +func (pb *playback) replayConsoleLoop() int { + for { + fmt.Printf("> ") + bufReader := bufio.NewReader(os.Stdin) + line, more, err := bufReader.ReadLine() + if more { + tmos.Exit("input is too long") + } else if err != nil { + tmos.Exit(err.Error()) + } + + tokens := strings.Split(string(line), " ") + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "next": + // "next" -> replay next message + // "next N" -> replay next N messages + + if len(tokens) == 1 { + return 0 + } + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("next takes an integer argument") + } else { + return i + } + + case "back": + // "back" -> go back one message + // "back N" -> go back N messages + + // NOTE: "back" is not supported in the state machine design, + // so we restart and replay up to + + ctx := context.Background() + // ensure all new step events are regenerated as expected + + newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + if err != nil { + tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + } + defer func() { + if err := pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep); err != nil { + pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) + } + }() + + if len(tokens) == 1 { + if err := pb.replayReset(1, newStepSub); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } else { + i, err := strconv.Atoi(tokens[1]) + if err != nil { + fmt.Println("back takes an integer argument") + } else if i > pb.count { + fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) + } else if err := pb.replayReset(i, newStepSub); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } + } + + case "rs": + // "rs" -> print entire round state + // "rs short" -> print height/round/step + // "rs " -> print another field of the round state + + rs := pb.cs.RoundState + if len(tokens) == 1 { + fmt.Println(rs) + } else { + switch tokens[1] { + case "short": + fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) + case "validators": + fmt.Println(rs.Validators) + case "proposal": + fmt.Println(rs.Proposal) + case "proposal_block": + fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort()) + case "locked_round": + fmt.Println(rs.LockedRound) + case "locked_block": + fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort()) + case "votes": + fmt.Println(rs.Votes.StringIndented(" ")) + + default: + fmt.Println("Unknown option", tokens[1]) + } + } + case "n": + fmt.Println(pb.count) + } + } +} + +//-------------------------------------------------------------------------------- + +// convenience for replay mode +func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { + dbType := dbm.BackendType(config.DBBackend) + // Get BlockStore + blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } + blockStore := store.NewBlockStore(blockStoreDB) + + // Get State + stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + if err != nil { + tmos.Exit(err.Error()) + } + stateStore := sm.NewStore(stateDB) + gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + if err != nil { + tmos.Exit(err.Error()) + } + state, err := sm.MakeGenesisState(gdoc) + if err != nil { + tmos.Exit(err.Error()) + } + + // Create proxyAppConn connection (consensus, mempool, query) + clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator) + err = proxyApp.Start() + if err != nil { + tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + } + + eventBus := types.NewEventBus() + if err := eventBus.Start(); err != nil { + tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + } + + handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) + handshaker.SetEventBus(eventBus) + err = handshaker.Handshake(proxyApp) + if err != nil { + tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + } + + mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + + consensusState := NewState(csConfig, state.Copy(), blockExec, + blockStore, mempool, evpool, map[int64]Misbehavior{}) + + consensusState.SetEventBus(eventBus) + return consensusState +} diff --git a/test/maverick/consensus/replay_stubs.go b/test/maverick/consensus/replay_stubs.go new file mode 100644 index 000000000..08974a67e --- /dev/null +++ b/test/maverick/consensus/replay_stubs.go @@ -0,0 +1,90 @@ +package consensus + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/clist" + mempl "github.com/tendermint/tendermint/mempool" + tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + "github.com/tendermint/tendermint/proxy" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- + +type emptyMempool struct{} + +var _ mempl.Mempool = emptyMempool{} + +func (emptyMempool) Lock() {} +func (emptyMempool) Unlock() {} +func (emptyMempool) Size() int { return 0 } +func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { + return nil +} +func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) Update( + _ int64, + _ types.Txs, + _ []*abci.ResponseDeliverTx, + _ mempl.PreCheckFunc, + _ mempl.PostCheckFunc, +) error { + return nil +} +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) TxsBytes() int64 { return 0 } + +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } + +func (emptyMempool) InitWAL() error { return nil } +func (emptyMempool) CloseWAL() {} + +//----------------------------------------------------------------------------- +// mockProxyApp uses ABCIResponses to give the right results. +// +// Useful because we don't want to call Commit() twice for the same block on +// the real app. + +func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { + clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ + appHash: appHash, + abciResponses: abciResponses, + }) + cli, _ := clientCreator.NewABCIClient() + err := cli.Start() + if err != nil { + panic(err) + } + return proxy.NewAppConnConsensus(cli) +} + +type mockProxyApp struct { + abci.BaseApplication + + appHash []byte + txCount int + abciResponses *tmstate.ABCIResponses +} + +func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + r := mock.abciResponses.DeliverTxs[mock.txCount] + mock.txCount++ + if r == nil { + return abci.ResponseDeliverTx{} + } + return *r +} + +func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + mock.txCount = 0 + return *mock.abciResponses.EndBlock +} + +func (mock *mockProxyApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{Data: mock.appHash} +} diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go new file mode 100644 index 000000000..a1385bd1e --- /dev/null +++ b/test/maverick/consensus/state.go @@ -0,0 +1,1976 @@ +package consensus + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "runtime/debug" + "sync" + "time" + + "github.com/gogo/protobuf/proto" + + cfg "github.com/tendermint/tendermint/config" + cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/crypto" + tmevents "github.com/tendermint/tendermint/libs/events" + "github.com/tendermint/tendermint/libs/fail" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// State handles execution of the consensus algorithm. +// It processes votes and proposals, and upon reaching agreement, +// commits blocks to the chain and executes them against the application. +// The internal state machine receives input from peers, the internal validator, and from a timer. +type State struct { + service.BaseService + + // config details + config *cfg.ConsensusConfig + privValidator types.PrivValidator // for signing votes + + // store blocks and commits + blockStore sm.BlockStore + + // create and execute blocks + blockExec *sm.BlockExecutor + + // notify us if txs are available + txNotifier txNotifier + + // add evidence to the pool + // when it's detected + evpool evidencePool + + // internal state + mtx sync.RWMutex + cstypes.RoundState + state sm.State // State until height-1. + + // state changes may be triggered by: msgs from peers, + // msgs from ourself, or by timeouts + peerMsgQueue chan msgInfo + internalMsgQueue chan msgInfo + timeoutTicker TimeoutTicker + // privValidator pubkey, memoized for the duration of one block + // to avoid extra requests to HSM + privValidatorPubKey crypto.PubKey + + // information about about added votes and block parts are written on this channel + // so statistics can be computed by reactor + statsMsgQueue chan msgInfo + + // we use eventBus to trigger msg broadcasts in the reactor, + // and to notify external subscribers, eg. through a websocket + eventBus *types.EventBus + + // a Write-Ahead Log ensures we can recover from any kind of crash + // and helps us avoid signing conflicting votes + wal WAL + replayMode bool // so we don't log signing errors during replay + doWALCatchup bool // determines if we even try to do the catchup + + // for tests where we want to limit the number of transitions the state makes + nSteps int + + // some functions can be overwritten for testing + decideProposal func(height int64, round int32) + + // closed when we finish shutting down + done chan struct{} + + // synchronous pubsub between consensus state and reactor. + // state only emits EventNewRoundStep and EventVote + evsw tmevents.EventSwitch + + // for reporting metrics + metrics *Metrics + + // misbehaviors mapped for each height (can't have more than one misbehavior per height) + misbehaviors map[int64]Misbehavior + + // the switch is passed to the state so that maveick misbehaviors can directly control which + // information they send to which nodes + sw *p2p.Switch +} + +// StateOption sets an optional parameter on the State. +type StateOption func(*State) + +// NewState returns a new State. +func NewState( + config *cfg.ConsensusConfig, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + txNotifier txNotifier, + evpool evidencePool, + misbehaviors map[int64]Misbehavior, + options ...StateOption, +) *State { + cs := &State{ + config: config, + blockExec: blockExec, + blockStore: blockStore, + txNotifier: txNotifier, + peerMsgQueue: make(chan msgInfo, msgQueueSize), + internalMsgQueue: make(chan msgInfo, msgQueueSize), + timeoutTicker: NewTimeoutTicker(), + statsMsgQueue: make(chan msgInfo, msgQueueSize), + done: make(chan struct{}), + doWALCatchup: true, + wal: nilWAL{}, + evpool: evpool, + evsw: tmevents.NewEventSwitch(), + metrics: NopMetrics(), + misbehaviors: misbehaviors, + } + // set function defaults (may be overwritten before calling Start) + cs.decideProposal = cs.defaultDecideProposal + + // We have no votes, so reconstruct LastCommit from SeenCommit. + if state.LastBlockHeight > 0 { + cs.reconstructLastCommit(state) + } + + cs.updateToState(state) + + // Don't call scheduleRound0 yet. + // We do that upon Start(). + + cs.BaseService = *service.NewBaseService(nil, "State", cs) + for _, option := range options { + option(cs) + } + return cs +} + +// I know this is not great but the maverick consensus state needs access to the peers +func (cs *State) SetSwitch(sw *p2p.Switch) { + cs.sw = sw +} + +// state transitions on complete-proposal, 2/3-any, 2/3-one +func (cs *State) handleMsg(mi msgInfo) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + var ( + added bool + err error + ) + msg, peerID := mi.Msg, mi.PeerID + switch msg := msg.(type) { + case *ProposalMessage: + // will not cause transition. + // once proposal is set, we can receive block parts + // err = cs.setProposal(msg.Proposal) + if b, ok := cs.misbehaviors[cs.Height]; ok { + err = b.ReceiveProposal(cs, msg.Proposal) + } else { + err = defaultReceiveProposal(cs, msg.Proposal) + } + case *BlockPartMessage: + // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit + added, err = cs.addProposalBlockPart(msg, peerID) + if added { + cs.statsMsgQueue <- mi + } + + if err != nil && msg.Round != cs.Round { + cs.Logger.Debug( + "Received block part from wrong round", + "height", + cs.Height, + "csRound", + cs.Round, + "blockRound", + msg.Round) + err = nil + } + case *VoteMessage: + // attempt to add the vote and dupeout the validator if its a duplicate signature + // if the vote gives us a 2/3-any or 2/3-one, we transition + added, err = cs.tryAddVote(msg.Vote, peerID) + if added { + cs.statsMsgQueue <- mi + } + + // if err == ErrAddingVote { + // TODO: punish peer + // We probably don't want to stop the peer here. The vote does not + // necessarily comes from a malicious peer but can be just broadcasted by + // a typical peer. + // https://github.com/tendermint/tendermint/issues/1281 + // } + + // NOTE: the vote is broadcast to peers by the reactor listening + // for vote events + + // TODO: If rs.Height == vote.Height && rs.Round < vote.Round, + // the peer is sending us CatchupCommit precommits. + // We could make note of this and help filter in broadcastHasVoteMessage(). + default: + cs.Logger.Error("Unknown msg type", "type", reflect.TypeOf(msg)) + return + } + + if err != nil { + cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, + "peer", peerID, "err", err, "msg", msg) + } +} + +// Enter (CreateEmptyBlocks): from enterNewRound(height,round) +// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool +func (cs *State) enterPropose(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPropose: + cs.updateRoundStep(round, cstypes.RoundStepPropose) + cs.newStep() + + // If we have the whole proposal + POL, then goto Prevote now. + // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), + // or else after timeoutPropose + if cs.isProposalComplete() { + cs.enterPrevote(height, cs.Round) + } + }() + + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPropose(cs, height, round) + } else { + defaultEnterPropose(cs, height, round) + } +} + +// Enter: `timeoutPropose` after entering Propose. +// Enter: proposal block and POL is ready. +// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. +// Otherwise vote nil. +func (cs *State) enterPrevote(height int64, round int32) { + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { + cs.Logger.Debug(fmt.Sprintf( + "enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + defer func() { + // Done enterPrevote: + cs.updateRoundStep(round, cstypes.RoundStepPrevote) + cs.newStep() + }() + + cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Sign and broadcast vote as necessary + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPrevote(cs, height, round) + } else { + defaultEnterPrevote(cs, height, round) + } + + // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait + // (so we have more time to try and collect +2/3 prevotes for a single block) +} + +// Enter: `timeoutPrevote` after any +2/3 prevotes. +// Enter: `timeoutPrecommit` after any +2/3 precommits. +// Enter: +2/3 precomits for block or nil. +// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) +// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, +// else, precommit nil otherwise. +func (cs *State) enterPrecommit(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommit: + cs.updateRoundStep(round, cstypes.RoundStepPrecommit) + cs.newStep() + }() + + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.EnterPrecommit(cs, height, round) + } else { + defaultEnterPrecommit(cs, height, round) + } + +} + +func (cs *State) addVote( + vote *types.Vote, + peerID p2p.ID) (added bool, err error) { + cs.Logger.Debug( + "addVote", + "voteHeight", + vote.Height, + "voteType", + vote.Type, + "valIndex", + vote.ValidatorIndex, + "csHeight", + cs.Height, + ) + + // A precommit for the previous height? + // These come in while we wait timeoutCommit + if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { + if cs.Step != cstypes.RoundStepNewHeight { + // Late precommit at prior height is ignored + cs.Logger.Debug("Precommit vote came in after commit timeout and has been ignored", "vote", vote) + return + } + added, err = cs.LastCommit.AddVote(vote) + if !added { + return + } + + cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) + _ = cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + // if we can skip timeoutCommit and have all the votes now, + if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { + // go straight to new round (skip timeout commit) + // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) + cs.enterNewRound(cs.Height, 0) + } + + return + } + + // Height mismatch is ignored. + // Not necessarily a bad peer, but not favourable behaviour. + if vote.Height != cs.Height { + cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "peerID", peerID) + return + } + + added, err = cs.Votes.AddVote(vote, peerID) + if !added { + // Either duplicate, or error upon cs.Votes.AddByIndex() + return + } + + _ = cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}) + cs.evsw.FireEvent(types.EventVote, vote) + + switch vote.Type { + case tmproto.PrevoteType: + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.ReceivePrevote(cs, vote) + } else { + defaultReceivePrevote(cs, vote) + } + + case tmproto.PrecommitType: + if b, ok := cs.misbehaviors[cs.Height]; ok { + b.ReceivePrecommit(cs, vote) + } + defaultReceivePrecommit(cs, vote) + + default: + panic(fmt.Sprintf("Unexpected vote type %v", vote.Type)) + } + + return added, err +} + +//----------------------------------------------------------------------------- +// Errors + +var ( + ErrInvalidProposalSignature = errors.New("error invalid proposal signature") + ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") + ErrAddingVote = errors.New("error adding vote") + ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") + + errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors") +) + +//----------------------------------------------------------------------------- + +var ( + msgQueueSize = 1000 +) + +// msgs from the reactor which may update the state +type msgInfo struct { + Msg Message `json:"msg"` + PeerID p2p.ID `json:"peer_key"` +} + +// internally generated messages which may update the state +type timeoutInfo struct { + Duration time.Duration `json:"duration"` + Height int64 `json:"height"` + Round int32 `json:"round"` + Step cstypes.RoundStepType `json:"step"` +} + +func (ti *timeoutInfo) String() string { + return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) +} + +// interface to the mempool +type txNotifier interface { + TxsAvailable() <-chan struct{} +} + +// interface to the evidence pool +type evidencePool interface { + // Adds consensus based evidence to the evidence pool where time is the time + // of the block where the offense occurred and the validator set is the current one. + AddEvidenceFromConsensus(evidence types.Evidence) error +} + +//---------------------------------------- +// Public interface + +// SetLogger implements Service. +func (cs *State) SetLogger(l log.Logger) { + cs.BaseService.Logger = l + cs.timeoutTicker.SetLogger(l) +} + +// SetEventBus sets event bus. +func (cs *State) SetEventBus(b *types.EventBus) { + cs.eventBus = b + cs.blockExec.SetEventBus(b) +} + +// StateMetrics sets the metrics. +func StateMetrics(metrics *Metrics) StateOption { + return func(cs *State) { cs.metrics = metrics } +} + +// String returns a string. +func (cs *State) String() string { + // better not to access shared variables + return "ConsensusState" +} + +// GetState returns a copy of the chain state. +func (cs *State) GetState() sm.State { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.state.Copy() +} + +// GetLastHeight returns the last height committed. +// If there were no blocks, returns 0. +func (cs *State) GetLastHeight() int64 { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.RoundState.Height - 1 +} + +// GetRoundState returns a shallow copy of the internal consensus state. +func (cs *State) GetRoundState() *cstypes.RoundState { + cs.mtx.RLock() + rs := cs.RoundState // copy + cs.mtx.RUnlock() + return &rs +} + +// GetRoundStateJSON returns a json of RoundState. +func (cs *State) GetRoundStateJSON() ([]byte, error) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return tmjson.Marshal(cs.RoundState) +} + +// GetRoundStateSimpleJSON returns a json of RoundStateSimple +func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return tmjson.Marshal(cs.RoundState.RoundStateSimple()) +} + +// GetValidators returns a copy of the current validators. +func (cs *State) GetValidators() (int64, []*types.Validator) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators +} + +// SetPrivValidator sets the private validator account for signing votes. It +// immediately requests pubkey and caches it. +func (cs *State) SetPrivValidator(priv types.PrivValidator) { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + cs.privValidator = priv + + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } +} + +// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing. +func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) { + cs.mtx.Lock() + cs.timeoutTicker = timeoutTicker + cs.mtx.Unlock() +} + +// LoadCommit loads the commit for a given height. +func (cs *State) LoadCommit(height int64) *types.Commit { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + if height == cs.blockStore.Height() { + return cs.blockStore.LoadSeenCommit(height) + } + return cs.blockStore.LoadBlockCommit(height) +} + +// OnStart loads the latest state via the WAL, and starts the timeout and +// receive routines. +func (cs *State) OnStart() error { + // We may set the WAL in testing before calling Start, so only OpenWAL if its + // still the nilWAL. + if _, ok := cs.wal.(nilWAL); ok { + if err := cs.loadWalFile(); err != nil { + return err + } + } + + // We may have lost some votes if the process crashed reload from consensus + // log to catchup. + if cs.doWALCatchup { + repairAttempted := false + LOOP: + for { + err := cs.catchupReplay(cs.Height) + switch { + case err == nil: + break LOOP + case !IsDataCorruptionError(err): + cs.Logger.Error("Error on catchup replay. Proceeding to start State anyway", "err", err) + break LOOP + case repairAttempted: + return err + } + + cs.Logger.Info("WAL file is corrupted. Attempting repair", "err", err) + + // 1) prep work + if err := cs.wal.Stop(); err != nil { + return err + } + repairAttempted = true + + // 2) backup original WAL file + corruptedFile := fmt.Sprintf("%s.CORRUPTED", cs.config.WalFile()) + if err := tmos.CopyFile(cs.config.WalFile(), corruptedFile); err != nil { + return err + } + cs.Logger.Info("Backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + + // 3) try to repair (WAL file will be overwritten!) + if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { + cs.Logger.Error("Repair failed", "err", err) + return err + } + cs.Logger.Info("Successful repair") + + // reload WAL file + if err := cs.loadWalFile(); err != nil { + return err + } + } + } + + if err := cs.evsw.Start(); err != nil { + return err + } + + // we need the timeoutRoutine for replay so + // we don't block on the tick chan. + // NOTE: we will get a build up of garbage go routines + // firing on the tockChan until the receiveRoutine is started + // to deal with them (by that point, at most one will be valid) + if err := cs.timeoutTicker.Start(); err != nil { + return err + } + + // Double Signing Risk Reduction + if err := cs.checkDoubleSigningRisk(cs.Height); err != nil { + return err + } + + // now start the receiveRoutine + go cs.receiveRoutine(0) + + // schedule the first round! + // use GetRoundState so we don't race the receiveRoutine for access + cs.scheduleRound0(cs.GetRoundState()) + + return nil +} + +// loadWalFile loads WAL data from file. It overwrites cs.wal. +func (cs *State) loadWalFile() error { + wal, err := cs.OpenWAL(cs.config.WalFile()) + if err != nil { + cs.Logger.Error("Error loading State wal", "err", err) + return err + } + cs.wal = wal + return nil +} + +// OnStop implements service.Service. +func (cs *State) OnStop() { + if err := cs.evsw.Stop(); err != nil { + cs.Logger.Error("error trying to stop eventSwitch", "error", err) + } + if err := cs.timeoutTicker.Stop(); err != nil { + cs.Logger.Error("error trying to stop timeoutTicket", "error", err) + } + // WAL is stopped in receiveRoutine. +} + +// Wait waits for the the main routine to return. +// NOTE: be sure to Stop() the event switch and drain +// any event channels or this may deadlock +func (cs *State) Wait() { + <-cs.done +} + +// OpenWAL opens a file to log all consensus messages and timeouts for +// deterministic accountability. +func (cs *State) OpenWAL(walFile string) (WAL, error) { + wal, err := NewWAL(walFile) + if err != nil { + cs.Logger.Error("Failed to open WAL", "file", walFile, "err", err) + return nil, err + } + wal.SetLogger(cs.Logger.With("wal", walFile)) + if err := wal.Start(); err != nil { + cs.Logger.Error("Failed to start WAL", "err", err) + return nil, err + } + return wal, nil +} + +//------------------------------------------------------------ +// Public interface for passing messages into the consensus state, possibly causing a state transition. +// If peerID == "", the msg is considered internal. +// Messages are added to the appropriate queue (peer or internal). +// If the queue is full, the function may block. +// TODO: should these return anything or let callers just use events? + +// AddVote inputs a vote. +func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + } + + // TODO: wait for event?! + return false, nil +} + +// SetProposal inputs a proposal. +func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// AddProposalBlockPart inputs a part of the proposal block. +func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { + + if peerID == "" { + cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + } else { + cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + } + + // TODO: wait for event?! + return nil +} + +// SetProposalAndBlock inputs the proposal and all block parts. +func (cs *State) SetProposalAndBlock( + proposal *types.Proposal, + block *types.Block, + parts *types.PartSet, + peerID p2p.ID, +) error { + if err := cs.SetProposal(proposal, peerID); err != nil { + return err + } + for i := 0; i < int(parts.Total()); i++ { + part := parts.GetPart(i) + if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + return err + } + } + return nil +} + +//------------------------------------------------------------ +// internal functions for managing the state + +func (cs *State) updateHeight(height int64) { + cs.metrics.Height.Set(float64(height)) + cs.Height = height +} + +func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { + cs.Round = round + cs.Step = step +} + +// enterNewRound(height, 0) at cs.StartTime. +func (cs *State) scheduleRound0(rs *cstypes.RoundState) { + // cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + sleepDuration := rs.StartTime.Sub(tmtime.Now()) + cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) +} + +// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) +func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int32, step cstypes.RoundStepType) { + cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) +} + +// send a msg into the receiveRoutine regarding our own proposal, block part, or vote +func (cs *State) sendInternalMessage(mi msgInfo) { + select { + case cs.internalMsgQueue <- mi: + default: + // NOTE: using the go-routine means our votes can + // be processed out of order. + // TODO: use CList here for strict determinism and + // attempt push to internalMsgQueue in receiveRoutine + cs.Logger.Info("Internal msg queue is full. Using a go-routine") + go func() { cs.internalMsgQueue <- mi }() + } +} + +// Reconstruct LastCommit from SeenCommit, which we saved along with the block, +// (which happens even before saving the state) +func (cs *State) reconstructLastCommit(state sm.State) { + seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) + if seenCommit == nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: seen commit for height %v not found", + state.LastBlockHeight)) + } + + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) + if !lastPrecommits.HasTwoThirdsMajority() { + panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") + } + + cs.LastCommit = lastPrecommits +} + +// Updates State and increments height to match that of state. +// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. +func (cs *State) updateToState(state sm.State) { + if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { + panic(fmt.Sprintf("updateToState() expected state height of %v but found %v", + cs.Height, state.LastBlockHeight)) + } + if !cs.state.IsEmpty() { + if cs.state.LastBlockHeight > 0 && cs.state.LastBlockHeight+1 != cs.Height { + // This might happen when someone else is mutating cs.state. + // Someone forgot to pass in state.Copy() somewhere?! + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cs.state.LastBlockHeight+1, cs.Height)) + } + if cs.state.LastBlockHeight > 0 && cs.Height == cs.state.InitialHeight { + panic(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight %v, expected 0 for initial height %v", + cs.state.LastBlockHeight, cs.state.InitialHeight)) + } + + // If state isn't further out than cs.state, just ignore. + // This happens when SwitchToConsensus() is called in the reactor. + // We don't want to reset e.g. the Votes, but we still want to + // signal the new round step, because other services (eg. txNotifier) + // depend on having an up-to-date peer state! + if state.LastBlockHeight <= cs.state.LastBlockHeight { + cs.Logger.Info( + "Ignoring updateToState()", + "newHeight", + state.LastBlockHeight+1, + "oldHeight", + cs.state.LastBlockHeight+1) + cs.newStep() + return + } + } + + // Reset fields based on state. + validators := state.Validators + + switch { + case state.LastBlockHeight == 0: // Very first commit should be empty. + cs.LastCommit = (*types.VoteSet)(nil) + case cs.CommitRound > -1 && cs.Votes != nil: // Otherwise, use cs.Votes + if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { + panic(fmt.Sprintf("Wanted to form a Commit, but Precommits (H/R: %d/%d) didn't have 2/3+: %v", + state.LastBlockHeight, + cs.CommitRound, + cs.Votes.Precommits(cs.CommitRound))) + } + cs.LastCommit = cs.Votes.Precommits(cs.CommitRound) + case cs.LastCommit == nil: + // NOTE: when Tendermint starts, it has no votes. reconstructLastCommit + // must be called to reconstruct LastCommit from SeenCommit. + panic(fmt.Sprintf("LastCommit cannot be empty after initial block (H:%d)", + state.LastBlockHeight+1, + )) + } + + // Next desired block height + height := state.LastBlockHeight + 1 + if height == 1 { + height = state.InitialHeight + } + + // RoundState fields + cs.updateHeight(height) + cs.updateRoundStep(0, cstypes.RoundStepNewHeight) + if cs.CommitTime.IsZero() { + // "Now" makes it easier to sync up dev nodes. + // We add timeoutCommit to allow transactions + // to be gathered for the first block. + // And alternative solution that relies on clocks: + // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) + cs.StartTime = cs.config.Commit(tmtime.Now()) + } else { + cs.StartTime = cs.config.Commit(cs.CommitTime) + } + + cs.Validators = validators + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + cs.LockedRound = -1 + cs.LockedBlock = nil + cs.LockedBlockParts = nil + cs.ValidRound = -1 + cs.ValidBlock = nil + cs.ValidBlockParts = nil + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.CommitRound = -1 + cs.LastValidators = state.LastValidators + cs.TriggeredTimeoutPrecommit = false + + cs.state = state + + // Finally, broadcast RoundState + cs.newStep() +} + +func (cs *State) newStep() { + rs := cs.RoundStateEvent() + if err := cs.wal.Write(rs); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + cs.nSteps++ + // newStep is called by updateToState in NewState before the eventBus is set! + if cs.eventBus != nil { + if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { + cs.Logger.Error("Error publishing new round step", "err", err) + } + cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) + } +} + +//----------------------------------------- +// the main go routines + +// receiveRoutine handles messages which may cause state transitions. +// it's argument (n) is the number of messages to process before exiting - use 0 to run forever +// It keeps the RoundState and is the only thing that updates it. +// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +// State must be locked before any internal state is updated. +func (cs *State) receiveRoutine(maxSteps int) { + onExit := func(cs *State) { + // NOTE: the internalMsgQueue may have signed messages from our + // priv_val that haven't hit the WAL, but its ok because + // priv_val tracks LastSig + + // close wal now that we're done writing to it + if err := cs.wal.Stop(); err != nil { + cs.Logger.Error("error trying to stop wal", "error", err) + } + cs.wal.Wait() + + close(cs.done) + } + + defer func() { + if r := recover(); r != nil { + cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) + // stop gracefully + // + // NOTE: We most probably shouldn't be running any further when there is + // some unexpected panic. Some unknown error happened, and so we don't + // know if that will result in the validator signing an invalid thing. It + // might be worthwhile to explore a mechanism for manual resuming via + // some console or secure RPC system, but for now, halting the chain upon + // unexpected consensus bugs sounds like the better option. + onExit(cs) + } + }() + + for { + if maxSteps > 0 { + if cs.nSteps >= maxSteps { + cs.Logger.Info("reached max steps. exiting receive routine") + cs.nSteps = 0 + return + } + } + rs := cs.RoundState + var mi msgInfo + + select { + case <-cs.txNotifier.TxsAvailable(): + cs.handleTxsAvailable() + case mi = <-cs.peerMsgQueue: + if err := cs.wal.Write(mi); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + // handles proposals, block parts, votes + // may generate internal events (votes, complete proposals, 2/3 majorities) + cs.handleMsg(mi) + case mi = <-cs.internalMsgQueue: + err := cs.wal.WriteSync(mi) // NOTE: fsync + if err != nil { + panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", mi, err)) + } + + if _, ok := mi.Msg.(*VoteMessage); ok { + // we actually want to simulate failing during + // the previous WriteSync, but this isn't easy to do. + // Equivalent would be to fail here and manually remove + // some bytes from the end of the wal. + fail.Fail() // XXX + } + + // handles proposals, block parts, votes + cs.handleMsg(mi) + case ti := <-cs.timeoutTicker.Chan(): // tockChan: + if err := cs.wal.Write(ti); err != nil { + cs.Logger.Error("Error writing to wal", "err", err) + } + // if the timeout is relevant to the rs + // go to the next step + cs.handleTimeout(ti, rs) + case <-cs.Quit(): + onExit(cs) + return + } + } +} + +func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { + cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + + // timeouts must be for current height, round, step + if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { + cs.Logger.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) + return + } + + // the timeout will now cause a state transition + cs.mtx.Lock() + defer cs.mtx.Unlock() + + switch ti.Step { + case cstypes.RoundStepNewHeight: + // NewRound event fired from enterNewRound. + // XXX: should we fire timeout here (for timeout commit)? + cs.enterNewRound(ti.Height, 0) + case cstypes.RoundStepNewRound: + cs.enterPropose(ti.Height, 0) + case cstypes.RoundStepPropose: + if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout propose", "err", err) + } + cs.enterPrevote(ti.Height, ti.Round) + case cstypes.RoundStepPrevoteWait: + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } + cs.enterPrecommit(ti.Height, ti.Round) + case cstypes.RoundStepPrecommitWait: + if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing timeout wait", "err", err) + } + cs.enterPrecommit(ti.Height, ti.Round) + cs.enterNewRound(ti.Height, ti.Round+1) + default: + panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) + } + +} + +func (cs *State) handleTxsAvailable() { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + // We only need to do this for round 0. + if cs.Round != 0 { + return + } + + switch cs.Step { + case cstypes.RoundStepNewHeight: // timeoutCommit phase + if cs.needProofBlock(cs.Height) { + // enterPropose will be called by enterNewRound + return + } + + // +1ms to ensure RoundStepNewRound timeout always happens after RoundStepNewHeight + timeoutCommit := cs.StartTime.Sub(tmtime.Now()) + 1*time.Millisecond + cs.scheduleTimeout(timeoutCommit, cs.Height, 0, cstypes.RoundStepNewRound) + case cstypes.RoundStepNewRound: // after timeoutCommit + cs.enterPropose(cs.Height, 0) + } +} + +//----------------------------------------------------------------------------- +// State functions +// Used internally by handleTimeout and handleMsg to make state transitions + +// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) +// Enter: +2/3 precommits for nil at (height,round-1) +// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) +// NOTE: cs.StartTime was already set for height. +func (cs *State) enterNewRound(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { + logger.Debug(fmt.Sprintf( + "enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + + if now := tmtime.Now(); cs.StartTime.After(now) { + logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) + } + + logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + // Increment validators if necessary + validators := cs.Validators + if cs.Round < round { + validators = validators.Copy() + validators.IncrementProposerPriority(tmmath.SafeSubInt32(round, cs.Round)) + } + + // Setup new round + // we don't fire newStep for this step, + // but we fire an event, so update the round step first + cs.updateRoundStep(round, cstypes.RoundStepNewRound) + cs.Validators = validators + if round == 0 { + // We've already reset these upon new height, + // and meanwhile we might have received a proposal + // for round 0. + } else { + logger.Info("Resetting Proposal info") + cs.Proposal = nil + cs.ProposalBlock = nil + cs.ProposalBlockParts = nil + } + cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping + cs.TriggeredTimeoutPrecommit = false + + if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { + cs.Logger.Error("Error publishing new round", "err", err) + } + cs.metrics.Rounds.Set(float64(round)) + + // Wait for txs to be available in the mempool + // before we enterPropose in round 0. If the last block changed the app hash, + // we may need an empty "proof" block, and enterPropose immediately. + waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) + if waitForTxs { + if cs.config.CreateEmptyBlocksInterval > 0 { + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, + cstypes.RoundStepNewRound) + } + } else { + cs.enterPropose(height, round) + } +} + +// needProofBlock returns true on the first height (so the genesis app hash is signed right away) +// and where the last block (height-1) caused the app hash to change +func (cs *State) needProofBlock(height int64) bool { + if height == cs.state.InitialHeight { + return true + } + + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta == nil { + panic(fmt.Sprintf("needProofBlock: last block meta for height %d not found", height-1)) + } + return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) +} + +func (cs *State) isProposer(address []byte) bool { + return bytes.Equal(cs.Validators.GetProposer().Address, address) +} + +func (cs *State) defaultDecideProposal(height int64, round int32) { + var block *types.Block + var blockParts *types.PartSet + + // Decide on block + if cs.ValidBlock != nil { + // If there is valid block, choose that. + block, blockParts = cs.ValidBlock, cs.ValidBlockParts + } else { + // Create a new proposal block from state/txs from the mempool. + block, blockParts = cs.createProposalBlock() + if block == nil { + return + } + } + + // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, + // and the privValidator will refuse to sign anything. + if err := cs.wal.FlushAndSync(); err != nil { + cs.Logger.Error("Error flushing to disk") + } + + // Make proposal + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) + p := proposal.ToProto() + if err := cs.privValidator.SignProposal(cs.state.ChainID, p); err == nil { + proposal.Signature = p.Signature + + // send proposal and block parts on internal msg queue + cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + for i := 0; i < int(blockParts.Total()); i++ { + part := blockParts.GetPart(i) + cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + } + cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) + cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) + } else if !cs.replayMode { + cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + } +} + +// Returns true if the proposal block is complete && +// (if POLRound was proposed, we have +2/3 prevotes from there). +func (cs *State) isProposalComplete() bool { + if cs.Proposal == nil || cs.ProposalBlock == nil { + return false + } + // we have the proposal. if there's a POLRound, + // make sure we have the prevotes from it too + if cs.Proposal.POLRound < 0 { + return true + } + // if this is false the proposer is lying or we haven't received the POL yet + return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() + +} + +// Create the next block to propose and return it. Returns nil block upon error. +// +// We really only need to return the parts, but the block is returned for +// convenience so we can log the proposal block. +// +// NOTE: keep it side-effect free for clarity. +// CONTRACT: cs.privValidator is not nil. +func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { + if cs.privValidator == nil { + panic("entered createProposalBlock with privValidator being nil") + } + + var commit *types.Commit + switch { + case cs.Height == cs.state.InitialHeight: + // We're creating a proposal for the first block. + // The commit is empty, but not nil. + commit = types.NewCommit(0, 0, types.BlockID{}, nil) + case cs.LastCommit.HasTwoThirdsMajority(): + // Make the commit from LastCommit + commit = cs.LastCommit.MakeCommit() + default: // This shouldn't happen. + cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") + return + } + + if cs.privValidatorPubKey == nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + cs.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet)) + return + } + proposerAddr := cs.privValidatorPubKey.Address() + + return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr) +} + +// Enter: any +2/3 prevotes at next round. +func (cs *State) enterPrevoteWait(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { + logger.Debug(fmt.Sprintf( + "enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + round, + cs.Height, + cs.Round, + cs.Step)) + return + } + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + panic(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } + logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrevoteWait: + cs.updateRoundStep(round, cstypes.RoundStepPrevoteWait) + cs.newStep() + }() + + // Wait for some more prevotes; enterPrecommit + cs.scheduleTimeout(cs.config.Prevote(round), height, round, cstypes.RoundStepPrevoteWait) +} + +// Enter: any +2/3 precommits for next round. +func (cs *State) enterPrecommitWait(height int64, round int32) { + logger := cs.Logger.With("height", height, "round", round) + + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { + logger.Debug( + fmt.Sprintf( + "enterPrecommitWait(%v/%v): Invalid args. "+ + "Current state is Height/Round: %v/%v/, TriggeredTimeoutPrecommit:%v", + height, round, cs.Height, cs.Round, cs.TriggeredTimeoutPrecommit)) + return + } + if !cs.Votes.Precommits(round).HasTwoThirdsAny() { + panic(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + } + logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterPrecommitWait: + cs.TriggeredTimeoutPrecommit = true + cs.newStep() + }() + + // Wait for some more precommits; enterNewRound + cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) +} + +// Enter: +2/3 precommits for block +func (cs *State) enterCommit(height int64, commitRound int32) { + logger := cs.Logger.With("height", height, "commitRound", commitRound) + + if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { + logger.Debug(fmt.Sprintf( + "enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", + height, + commitRound, + cs.Height, + cs.Round, + cs.Step)) + return + } + logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + + defer func() { + // Done enterCommit: + // keep cs.Round the same, commitRound points to the right Precommits set. + cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) + cs.CommitRound = commitRound + cs.CommitTime = tmtime.Now() + cs.newStep() + + // Maybe finalize immediately. + cs.tryFinalizeCommit(height) + }() + + blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() + if !ok { + panic("RunActionCommit() expects +2/3 precommits") + } + + // The Locked* fields no longer matter. + // Move them over to ProposalBlock if they match the commit hash, + // otherwise they'll be cleared in updateToState. + if cs.LockedBlock.HashesTo(blockID.Hash) { + logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) + cs.ProposalBlock = cs.LockedBlock + cs.ProposalBlockParts = cs.LockedBlockParts + } + + // If we don't have the block being committed, set up to get it. + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { + logger.Info( + "Commit is for a block we don't know about. Set ProposalBlock=nil", + "proposal", + cs.ProposalBlock.Hash(), + "commit", + blockID.Hash) + // We're getting the wrong block. + // Set up ProposalBlockParts and keep waiting. + cs.ProposalBlock = nil + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { + cs.Logger.Error("Error publishing valid block", "err", err) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + } + // else { + // We just need to keep waiting. + // } + } +} + +// If we have the block AND +2/3 commits for it, finalize. +func (cs *State) tryFinalizeCommit(height int64) { + logger := cs.Logger.With("height", height) + + if cs.Height != height { + panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + if !ok || len(blockID.Hash) == 0 { + logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .") + return + } + if !cs.ProposalBlock.HashesTo(blockID.Hash) { + // TODO: this happens every time if we're not a validator (ugly logs) + // TODO: ^^ wait, why does it matter that we're a validator? + logger.Info( + "Attempt to finalize failed. We don't have the commit block.", + "proposal-block", + cs.ProposalBlock.Hash(), + "commit-block", + blockID.Hash) + return + } + + // go + cs.finalizeCommit(height) +} + +// Increment height and goto cstypes.RoundStepNewHeight +func (cs *State) finalizeCommit(height int64) { + if cs.Height != height || cs.Step != cstypes.RoundStepCommit { + cs.Logger.Debug(fmt.Sprintf( + "finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", + height, + cs.Height, + cs.Round, + cs.Step)) + return + } + + blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() + block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts + + if !ok { + panic("Cannot finalizeCommit, commit does not have two thirds majority") + } + if !blockParts.HasHeader(blockID.PartSetHeader) { + panic("Expected ProposalBlockParts header to be commit header") + } + if !block.HashesTo(blockID.Hash) { + panic("Cannot finalizeCommit, ProposalBlock does not hash to commit hash") + } + if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { + panic(fmt.Errorf("+2/3 committed an invalid block: %w", err)) + } + + cs.Logger.Info("Finalizing commit of block with N txs", + "height", block.Height, + "hash", block.Hash(), + "root", block.AppHash, + "N", len(block.Txs)) + cs.Logger.Info(fmt.Sprintf("%v", block)) + + fail.Fail() // XXX + + // Save to blockStore. + if cs.blockStore.Height() < block.Height { + // NOTE: the seenCommit is local justification to commit this block, + // but may differ from the LastCommit included in the next block + precommits := cs.Votes.Precommits(cs.CommitRound) + seenCommit := precommits.MakeCommit() + cs.blockStore.SaveBlock(block, blockParts, seenCommit) + } else { + // Happens during replay if we already saved the block but didn't commit + cs.Logger.Info("Calling finalizeCommit on already stored block", "height", block.Height) + } + + fail.Fail() // XXX + + // Write EndHeightMessage{} for this height, implying that the blockstore + // has saved the block. + // + // If we crash before writing this EndHeightMessage{}, we will recover by + // running ApplyBlock during the ABCI handshake when we restart. If we + // didn't save the block to the blockstore before writing + // EndHeightMessage{}, we'd have to change WAL replay -- currently it + // complains about replaying for heights where an #ENDHEIGHT entry already + // exists. + // + // Either way, the State should not be resumed until we + // successfully call ApplyBlock (ie. later here, or in Handshake after + // restart). + endMsg := EndHeightMessage{height} + if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync + panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", + endMsg, err)) + } + + fail.Fail() // XXX + + // Create a copy of the state for staging and an event cache for txs. + stateCopy := cs.state.Copy() + + // Execute and commit the block, update and save the state, and update the mempool. + // NOTE The block.AppHash wont reflect these txs until the next block. + var err error + var retainHeight int64 + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, + types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}, + block) + if err != nil { + cs.Logger.Error("Error on ApplyBlock", "err", err) + return + } + + fail.Fail() // XXX + + // Prune old heights, if requested by ABCI app. + if retainHeight > 0 { + pruned, err := cs.pruneBlocks(retainHeight) + if err != nil { + cs.Logger.Error("Failed to prune blocks", "retainHeight", retainHeight, "err", err) + } else { + cs.Logger.Info("Pruned blocks", "pruned", pruned, "retainHeight", retainHeight) + } + } + + // must be called before we update state + cs.recordMetrics(height, block) + + // NewHeightStep! + cs.updateToState(stateCopy) + + fail.Fail() // XXX + + // Private validator might have changed it's key pair => refetch pubkey. + if err := cs.updatePrivValidatorPubKey(); err != nil { + cs.Logger.Error("Can't get private validator pubkey", "err", err) + } + + // cs.StartTime is already set. + // Schedule Round0 to start soon. + cs.scheduleRound0(&cs.RoundState) + + // By here, + // * cs.Height has been increment to height+1 + // * cs.Step is now cstypes.RoundStepNewHeight + // * cs.StartTime is set to when we will start round0. +} + +func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { + base := cs.blockStore.Base() + if retainHeight <= base { + return 0, nil + } + pruned, err := cs.blockStore.PruneBlocks(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune block store: %w", err) + } + err = cs.blockExec.Store().PruneStates(base, retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune state database: %w", err) + } + return pruned, nil +} + +func (cs *State) recordMetrics(height int64, block *types.Block) { + cs.metrics.Validators.Set(float64(cs.Validators.Size())) + cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) + + var ( + missingValidators int + missingValidatorsPower int64 + ) + // height=0 -> MissingValidators and MissingValidatorsPower are both 0. + // Remember that the first LastCommit is intentionally empty, so it's not + // fair to increment missing validators number. + if height > cs.state.InitialHeight { + // Sanity check that commit size matches validator set size - only applies + // after first block. + var ( + commitSize = block.LastCommit.Size() + valSetLen = len(cs.LastValidators.Validators) + address types.Address + ) + if commitSize != valSetLen { + panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", + commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) + } + + if cs.privValidator != nil { + if cs.privValidatorPubKey == nil { + // Metrics won't be updated, but it's not critical. + cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet)) + } else { + address = cs.privValidatorPubKey.Address() + } + } + + for i, val := range cs.LastValidators.Validators { + commitSig := block.LastCommit.Signatures[i] + if commitSig.Absent() { + missingValidators++ + missingValidatorsPower += val.VotingPower + } + + if bytes.Equal(val.Address, address) { + label := []string{ + "validator_address", val.Address.String(), + } + cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) + if commitSig.ForBlock() { + cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) + } else { + cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + } + } + + } + } + cs.metrics.MissingValidators.Set(float64(missingValidators)) + cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) + + // NOTE: byzantine validators power and count is only for consensus evidence i.e. duplicate vote + var ( + byzantineValidatorsPower = int64(0) + byzantineValidatorsCount = int64(0) + ) + for _, ev := range block.Evidence.Evidence { + if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { + if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil { + byzantineValidatorsCount++ + byzantineValidatorsPower += val.VotingPower + } + } + } + cs.metrics.ByzantineValidators.Set(float64(byzantineValidatorsCount)) + cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) + + if height > 1 { + lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta != nil { + cs.metrics.BlockIntervalSeconds.Observe( + block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), + ) + } + } + + cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) + cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) + cs.metrics.BlockSizeBytes.Set(float64(block.Size())) + cs.metrics.CommittedHeight.Set(float64(block.Height)) +} + +//----------------------------------------------------------------------------- + +// NOTE: block is not necessarily valid. +// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, +// once we have the full block. +func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { + height, round, part := msg.Height, msg.Round, msg.Part + + // Blocks might be reused, so round mismatch is OK + if cs.Height != height { + cs.Logger.Debug("Received block part from wrong height", "height", height, "round", round) + return false, nil + } + + // We're not expecting a block part. + if cs.ProposalBlockParts == nil { + // NOTE: this can happen when we've gone to a higher round and + // then receive parts from the previous round - not necessarily a bad peer. + cs.Logger.Info("Received a block part when we're not expecting any", + "height", height, "round", round, "index", part.Index, "peer", peerID) + return false, nil + } + + added, err = cs.ProposalBlockParts.AddPart(part) + if err != nil { + return added, err + } + if cs.ProposalBlockParts.ByteSize() > cs.state.ConsensusParams.Block.MaxBytes { + return added, fmt.Errorf("total size of proposal block parts exceeds maximum block bytes (%d > %d)", + cs.ProposalBlockParts.ByteSize(), cs.state.ConsensusParams.Block.MaxBytes, + ) + } + if added && cs.ProposalBlockParts.IsComplete() { + bz, err := ioutil.ReadAll(cs.ProposalBlockParts.GetReader()) + if err != nil { + return added, err + } + + var pbb = new(tmproto.Block) + err = proto.Unmarshal(bz, pbb) + if err != nil { + return added, err + } + + block, err := types.BlockFromProto(pbb) + if err != nil { + return added, err + } + + cs.ProposalBlock = block + // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal + cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { + cs.Logger.Error("Error publishing event complete proposal", "err", err) + } + + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() + if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info("Updating valid block to new proposal block", + "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) + cs.ValidRound = cs.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either + // proposer is faulty or voting power of faulty processes is more + // than 1/3. We should trigger in the future accountability + // procedure at this point. + } + + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { + // Move onto the next step + cs.enterPrevote(height, cs.Round) + if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added + cs.enterPrecommit(height, cs.Round) + } + } else if cs.Step == cstypes.RoundStepCommit { + // If we're waiting on the proposal block... + cs.tryFinalizeCommit(height) + } + return added, nil + } + return added, nil +} + +// Attempt to add the vote. if its a duplicate signature, dupeout the validator +func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { + added, err := cs.addVote(vote, peerID) + if err != nil { + // If the vote height is off, we'll just ignore it, + // But if it's a conflicting sig, add it to the cs.evpool. + // If it's otherwise invalid, punish peer. + // nolint: gocritic + if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { + if cs.privValidatorPubKey == nil { + return false, errPubKeyIsNotSet + } + + if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) { + cs.Logger.Error( + "Found conflicting vote from ourselves. Did you unsafe_reset a validator?", + "height", + vote.Height, + "round", + vote.Round, + "type", + vote.Type) + return added, err + } + var timestamp time.Time + if voteErr.VoteA.Height == cs.state.InitialHeight { + timestamp = cs.state.LastBlockTime // genesis time + } else { + timestamp = sm.MedianTime(cs.LastCommit.MakeCommit(), cs.LastValidators) + } + ev := types.NewDuplicateVoteEvidence(voteErr.VoteA, voteErr.VoteB, timestamp, cs.Validators) + evidenceErr := cs.evpool.AddEvidenceFromConsensus(ev) + if evidenceErr != nil { + cs.Logger.Error("Failed to add evidence to the evidence pool", "err", evidenceErr) + } + return added, err + } else if err == types.ErrVoteNonDeterministicSignature { + cs.Logger.Debug("Vote has non-deterministic signature", "err", err) + } else { + // Either + // 1) bad peer OR + // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR + // 3) tmkms use with multiple validators connecting to a single tmkms instance + // (https://github.com/tendermint/tendermint/issues/3839). + cs.Logger.Info("Error attempting to add vote", "err", err) + return added, ErrAddingVote + } + } + return added, nil +} + +//----------------------------------------------------------------------------- + +// CONTRACT: cs.privValidator is not nil. +func (cs *State) signVote( + msgType tmproto.SignedMsgType, + hash []byte, + header types.PartSetHeader, +) (*types.Vote, error) { + // Flush the WAL. Otherwise, we may not recompute the same vote to sign, + // and the privValidator will refuse to sign anything. + if err := cs.wal.FlushAndSync(); err != nil { + return nil, err + } + + if cs.privValidatorPubKey == nil { + return nil, errPubKeyIsNotSet + } + addr := cs.privValidatorPubKey.Address() + valIdx, _ := cs.Validators.GetByAddress(addr) + + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: valIdx, + Height: cs.Height, + Round: cs.Round, + Timestamp: cs.voteTime(), + Type: msgType, + BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + } + v := vote.ToProto() + err := cs.privValidator.SignVote(cs.state.ChainID, v) + vote.Signature = v.Signature + + return vote, err +} + +func (cs *State) voteTime() time.Time { + now := tmtime.Now() + minVoteTime := now + // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, + // even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/. + timeIota := time.Duration(cs.state.ConsensusParams.Block.TimeIotaMs) * time.Millisecond + if cs.LockedBlock != nil { + // See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html + minVoteTime = cs.LockedBlock.Time.Add(timeIota) + } else if cs.ProposalBlock != nil { + minVoteTime = cs.ProposalBlock.Time.Add(timeIota) + } + + if now.After(minVoteTime) { + return now + } + return minVoteTime +} + +// sign the vote and publish on internalMsgQueue +func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { + if cs.privValidator == nil { // the node does not have a key + return nil + } + + if cs.privValidatorPubKey == nil { + // Vote won't be signed, but it's not critical. + cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet)) + return nil + } + + // If the node not in the validator set, do nothing. + if !cs.Validators.HasAddress(cs.privValidatorPubKey.Address()) { + return nil + } + + // TODO: pass pubKey to signVote + vote, err := cs.signVote(msgType, hash, header) + if err == nil { + cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) + cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote) + return vote + } + // if !cs.replayMode { + cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + //} + return nil +} + +// updatePrivValidatorPubKey get's the private validator public key and +// memoizes it. This func returns an error if the private validator is not +// responding or responds with an error. +func (cs *State) updatePrivValidatorPubKey() error { + if cs.privValidator == nil { + return nil + } + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return err + } + cs.privValidatorPubKey = pubKey + return nil +} + +// look back to check existence of the node's consensus votes before joining consensus +func (cs *State) checkDoubleSigningRisk(height int64) error { + if cs.privValidator != nil && cs.privValidatorPubKey != nil && cs.config.DoubleSignCheckHeight > 0 && height > 0 { + valAddr := cs.privValidatorPubKey.Address() + doubleSignCheckHeight := cs.config.DoubleSignCheckHeight + if doubleSignCheckHeight > height { + doubleSignCheckHeight = height + } + for i := int64(1); i < doubleSignCheckHeight; i++ { + lastCommit := cs.blockStore.LoadSeenCommit(height - i) + if lastCommit != nil { + for sigIdx, s := range lastCommit.Signatures { + if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { + cs.Logger.Info("Found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) + return ErrSignatureFoundInPastBlocks + } + } + } + } + } + return nil +} + +//--------------------------------------------------------- + +func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int { + if h1 < h2 { + return -1 + } else if h1 > h2 { + return 1 + } + if r1 < r2 { + return -1 + } else if r1 > r2 { + return 1 + } + if s1 < s2 { + return -1 + } else if s1 > s2 { + return 1 + } + return 0 +} + +// repairWalFile decodes messages from src (until the decoder errors) and +// writes them to dst. +func repairWalFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Open(dst) + if err != nil { + return err + } + defer out.Close() + + var ( + dec = NewWALDecoder(in) + enc = NewWALEncoder(out) + ) + + // best-case repair (until first error is encountered) + for { + msg, err := dec.Decode() + if err != nil { + break + } + + err = enc.Encode(msg) + if err != nil { + return fmt.Errorf("failed to encode msg: %w", err) + } + } + + return nil +} diff --git a/test/maverick/consensus/ticker.go b/test/maverick/consensus/ticker.go new file mode 100644 index 000000000..fb3571ac8 --- /dev/null +++ b/test/maverick/consensus/ticker.go @@ -0,0 +1,134 @@ +package consensus + +import ( + "time" + + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" +) + +var ( + tickTockBufferSize = 10 +) + +// TimeoutTicker is a timer that schedules timeouts +// conditional on the height/round/step in the timeoutInfo. +// The timeoutInfo.Duration may be non-positive. +type TimeoutTicker interface { + Start() error + Stop() error + Chan() <-chan timeoutInfo // on which to receive a timeout + ScheduleTimeout(ti timeoutInfo) // reset the timer + + SetLogger(log.Logger) +} + +// timeoutTicker wraps time.Timer, +// scheduling timeouts only for greater height/round/step +// than what it's already seen. +// Timeouts are scheduled along the tickChan, +// and fired on the tockChan. +type timeoutTicker struct { + service.BaseService + + timer *time.Timer + tickChan chan timeoutInfo // for scheduling timeouts + tockChan chan timeoutInfo // for notifying about them +} + +// NewTimeoutTicker returns a new TimeoutTicker. +func NewTimeoutTicker() TimeoutTicker { + tt := &timeoutTicker{ + timer: time.NewTimer(0), + tickChan: make(chan timeoutInfo, tickTockBufferSize), + tockChan: make(chan timeoutInfo, tickTockBufferSize), + } + tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.stopTimer() // don't want to fire until the first scheduled timeout + return tt +} + +// OnStart implements service.Service. It starts the timeout routine. +func (t *timeoutTicker) OnStart() error { + + go t.timeoutRoutine() + + return nil +} + +// OnStop implements service.Service. It stops the timeout routine. +func (t *timeoutTicker) OnStop() { + t.BaseService.OnStop() + t.stopTimer() +} + +// Chan returns a channel on which timeouts are sent. +func (t *timeoutTicker) Chan() <-chan timeoutInfo { + return t.tockChan +} + +// ScheduleTimeout schedules a new timeout by sending on the internal tickChan. +// The timeoutRoutine is always available to read from tickChan, so this won't block. +// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step. +func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { + t.tickChan <- ti +} + +//------------------------------------------------------------- + +// stop the timer and drain if necessary +func (t *timeoutTicker) stopTimer() { + // Stop() returns false if it was already fired or was stopped + if !t.timer.Stop() { + select { + case <-t.timer.C: + default: + t.Logger.Debug("Timer already stopped") + } + } +} + +// send on tickChan to start a new timer. +// timers are interupted and replaced by new ticks from later steps +// timeouts of 0 on the tickChan will be immediately relayed to the tockChan +func (t *timeoutTicker) timeoutRoutine() { + t.Logger.Debug("Starting timeout routine") + var ti timeoutInfo + for { + select { + case newti := <-t.tickChan: + t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) + + // ignore tickers for old height/round/step + if newti.Height < ti.Height { + continue + } else if newti.Height == ti.Height { + if newti.Round < ti.Round { + continue + } else if newti.Round == ti.Round { + if ti.Step > 0 && newti.Step <= ti.Step { + continue + } + } + } + + // stop the last timer + t.stopTimer() + + // update timeoutInfo and reset timer + // NOTE time.Timer allows duration to be non-positive + ti = newti + t.timer.Reset(ti.Duration) + t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + case <-t.timer.C: + t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + // go routine here guarantees timeoutRoutine doesn't block. + // Determinism comes from playback in the receiveRoutine. + // We can eliminate it by merging the timeoutRoutine into receiveRoutine + // and managing the timeouts ourselves with a millisecond ticker + go func(toi timeoutInfo) { t.tockChan <- toi }(ti) + case <-t.Quit(): + return + } + } +} diff --git a/test/maverick/consensus/wal.go b/test/maverick/consensus/wal.go new file mode 100644 index 000000000..7d698713f --- /dev/null +++ b/test/maverick/consensus/wal.go @@ -0,0 +1,437 @@ +package consensus + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "path/filepath" + "time" + + "github.com/gogo/protobuf/proto" + + auto "github.com/tendermint/tendermint/libs/autofile" + // tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/service" + tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" + tmtime "github.com/tendermint/tendermint/types/time" +) + +const ( + // time.Time + max consensus msg size + maxMsgSizeBytes = maxMsgSize + 24 + + // how often the WAL should be sync'd during period sync'ing + walDefaultFlushInterval = 2 * time.Second +) + +//-------------------------------------------------------- +// types and functions for savings consensus messages + +// TimedWALMessage wraps WALMessage and adds Time for debugging purposes. +type TimedWALMessage struct { + Time time.Time `json:"time"` + Msg WALMessage `json:"msg"` +} + +// EndHeightMessage marks the end of the given height inside WAL. +// @internal used by scripts/wal2json util. +type EndHeightMessage struct { + Height int64 `json:"height"` +} + +type WALMessage interface{} + +// func init() { +// tmjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") +// tmjson.RegisterType(timeoutInfo{}, "tendermint/wal/TimeoutInfo") +// tmjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") +// } + +//-------------------------------------------------------- +// Simple write-ahead logger + +// WAL is an interface for any write-ahead logger. +type WAL interface { + Write(WALMessage) error + WriteSync(WALMessage) error + FlushAndSync() error + + SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) + + // service methods + Start() error + Stop() error + Wait() +} + +// Write ahead logger writes msgs to disk before they are processed. +// Can be used for crash-recovery and deterministic replay. +// TODO: currently the wal is overwritten during replay catchup, give it a mode +// so it's either reading or appending - must read to end to start appending +// again. +type BaseWAL struct { + service.BaseService + + group *auto.Group + + enc *WALEncoder + + flushTicker *time.Ticker + flushInterval time.Duration +} + +var _ WAL = &BaseWAL{} + +// NewWAL returns a new write-ahead logger based on `baseWAL`, which implements +// WAL. It's flushed and synced to disk every 2s and once when stopped. +func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { + err := tmos.EnsureDir(filepath.Dir(walFile), 0700) + if err != nil { + return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) + } + + group, err := auto.OpenGroup(walFile, groupOptions...) + if err != nil { + return nil, err + } + wal := &BaseWAL{ + group: group, + enc: NewWALEncoder(group), + flushInterval: walDefaultFlushInterval, + } + wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + return wal, nil +} + +// SetFlushInterval allows us to override the periodic flush interval for the WAL. +func (wal *BaseWAL) SetFlushInterval(i time.Duration) { + wal.flushInterval = i +} + +func (wal *BaseWAL) Group() *auto.Group { + return wal.group +} + +func (wal *BaseWAL) SetLogger(l log.Logger) { + wal.BaseService.Logger = l + wal.group.SetLogger(l) +} + +func (wal *BaseWAL) OnStart() error { + size, err := wal.group.Head.Size() + if err != nil { + return err + } else if size == 0 { + if err := wal.WriteSync(EndHeightMessage{0}); err != nil { + return err + } + } + err = wal.group.Start() + if err != nil { + return err + } + wal.flushTicker = time.NewTicker(wal.flushInterval) + go wal.processFlushTicks() + return nil +} + +func (wal *BaseWAL) processFlushTicks() { + for { + select { + case <-wal.flushTicker.C: + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error("Periodic WAL flush failed", "err", err) + } + case <-wal.Quit(): + return + } + } +} + +// FlushAndSync flushes and fsync's the underlying group's data to disk. +// See auto#FlushAndSync +func (wal *BaseWAL) FlushAndSync() error { + return wal.group.FlushAndSync() +} + +// Stop the underlying autofile group. +// Use Wait() to ensure it's finished shutting down +// before cleaning up files. +func (wal *BaseWAL) OnStop() { + wal.flushTicker.Stop() + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error("error on flush data to disk", "error", err) + } + if err := wal.group.Stop(); err != nil { + wal.Logger.Error("error trying to stop wal", "error", err) + } + wal.group.Close() +} + +// Wait for the underlying autofile group to finish shutting down +// so it's safe to cleanup files. +func (wal *BaseWAL) Wait() { + wal.group.Wait() +} + +// Write is called in newStep and for each receive on the +// peerMsgQueue and the timeoutTicker. +// NOTE: does not call fsync() +func (wal *BaseWAL) Write(msg WALMessage) error { + if wal == nil { + return nil + } + + if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { + wal.Logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height", + "err", err, "msg", msg) + return err + } + + return nil +} + +// WriteSync is called when we receive a msg from ourselves +// so that we write to disk before sending signed messages. +// NOTE: calls fsync() +func (wal *BaseWAL) WriteSync(msg WALMessage) error { + if wal == nil { + return nil + } + + if err := wal.Write(msg); err != nil { + return err + } + + if err := wal.FlushAndSync(); err != nil { + wal.Logger.Error(`WriteSync failed to flush consensus wal. + WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted`, + "err", err) + return err + } + + return nil +} + +// WALSearchOptions are optional arguments to SearchForEndHeight. +type WALSearchOptions struct { + // IgnoreDataCorruptionErrors set to true will result in skipping data corruption errors. + IgnoreDataCorruptionErrors bool +} + +// SearchForEndHeight searches for the EndHeightMessage with the given height +// and returns an auto.GroupReader, whenever it was found or not and an error. +// Group reader will be nil if found equals false. +// +// CONTRACT: caller must close group reader. +func (wal *BaseWAL) SearchForEndHeight( + height int64, + options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + var ( + msg *TimedWALMessage + gr *auto.GroupReader + ) + lastHeightFound := int64(-1) + + // NOTE: starting from the last file in the group because we're usually + // searching for the last height. See replay.go + min, max := wal.group.MinIndex(), wal.group.MaxIndex() + wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max) + for index := max; index >= min; index-- { + gr, err = wal.group.NewReader(index) + if err != nil { + return nil, false, err + } + + dec := NewWALDecoder(gr) + for { + msg, err = dec.Decode() + if err == io.EOF { + // OPTIMISATION: no need to look for height in older files if we've seen h < height + if lastHeightFound > 0 && lastHeightFound < height { + gr.Close() + return nil, false, nil + } + // check next file + break + } + if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { + wal.Logger.Error("Corrupted entry. Skipping...", "err", err) + // do nothing + continue + } else if err != nil { + gr.Close() + return nil, false, err + } + + if m, ok := msg.Msg.(EndHeightMessage); ok { + lastHeightFound = m.Height + if m.Height == height { // found + wal.Logger.Info("Found", "height", height, "index", index) + return gr, true, nil + } + } + } + gr.Close() + } + + return nil, false, nil +} + +// ///////////////////////////////////////////////////////////////////////////// + +// A WALEncoder writes custom-encoded WAL messages to an output stream. +// +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value +type WALEncoder struct { + wr io.Writer +} + +// NewWALEncoder returns a new encoder that writes to wr. +func NewWALEncoder(wr io.Writer) *WALEncoder { + return &WALEncoder{wr} +} + +// Encode writes the custom encoding of v to the stream. It returns an error if +// the encoded size of v is greater than 1MB. Any error encountered +// during the write is also returned. +func (enc *WALEncoder) Encode(v *TimedWALMessage) error { + pbMsg, err := WALToProto(v.Msg) + if err != nil { + return err + } + pv := tmcons.TimedWALMessage{ + Time: v.Time, + Msg: pbMsg, + } + + data, err := proto.Marshal(&pv) + if err != nil { + panic(fmt.Errorf("encode timed wall message failure: %w", err)) + } + + crc := crc32.Checksum(data, crc32c) + length := uint32(len(data)) + if length > maxMsgSizeBytes { + return fmt.Errorf("msg is too big: %d bytes, max: %d bytes", length, maxMsgSizeBytes) + } + totalLength := 8 + int(length) + + msg := make([]byte, totalLength) + binary.BigEndian.PutUint32(msg[0:4], crc) + binary.BigEndian.PutUint32(msg[4:8], length) + copy(msg[8:], data) + + _, err = enc.wr.Write(msg) + return err +} + +// ///////////////////////////////////////////////////////////////////////////// + +// IsDataCorruptionError returns true if data has been corrupted inside WAL. +func IsDataCorruptionError(err error) bool { + _, ok := err.(DataCorruptionError) + return ok +} + +// DataCorruptionError is an error that occures if data on disk was corrupted. +type DataCorruptionError struct { + cause error +} + +func (e DataCorruptionError) Error() string { + return fmt.Sprintf("DataCorruptionError[%v]", e.cause) +} + +func (e DataCorruptionError) Cause() error { + return e.cause +} + +// A WALDecoder reads and decodes custom-encoded WAL messages from an input +// stream. See WALEncoder for the format used. +// +// It will also compare the checksums and make sure data size is equal to the +// length from the header. If that is not the case, error will be returned. +type WALDecoder struct { + rd io.Reader +} + +// NewWALDecoder returns a new decoder that reads from rd. +func NewWALDecoder(rd io.Reader) *WALDecoder { + return &WALDecoder{rd} +} + +// Decode reads the next custom-encoded value from its reader and returns it. +func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { + b := make([]byte, 4) + + _, err := dec.rd.Read(b) + if errors.Is(err, io.EOF) { + return nil, err + } + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)} + } + crc := binary.BigEndian.Uint32(b) + + b = make([]byte, 4) + _, err = dec.rd.Read(b) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)} + } + length := binary.BigEndian.Uint32(b) + + if length > maxMsgSizeBytes { + return nil, DataCorruptionError{fmt.Errorf( + "length %d exceeded maximum possible value of %d bytes", + length, + maxMsgSizeBytes)} + } + + data := make([]byte, length) + n, err := dec.rd.Read(data) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to read data: %v (read: %d, wanted: %d)", err, n, length)} + } + + // check checksum before decoding data + actualCRC := crc32.Checksum(data, crc32c) + if actualCRC != crc { + return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)} + } + + var res = new(tmcons.TimedWALMessage) + err = proto.Unmarshal(data, res) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} + } + + walMsg, err := WALFromProto(res.Msg) + if err != nil { + return nil, DataCorruptionError{fmt.Errorf("failed to convert from proto: %w", err)} + } + tMsgWal := &TimedWALMessage{ + Time: res.Time, + Msg: walMsg, + } + + return tMsgWal, err +} + +type nilWAL struct{} + +var _ WAL = nilWAL{} + +func (nilWAL) Write(m WALMessage) error { return nil } +func (nilWAL) WriteSync(m WALMessage) error { return nil } +func (nilWAL) FlushAndSync() error { return nil } +func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + return nil, false, nil +} +func (nilWAL) Start() error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/test/maverick/consensus/wal_fuzz.go b/test/maverick/consensus/wal_fuzz.go new file mode 100644 index 000000000..e15097c30 --- /dev/null +++ b/test/maverick/consensus/wal_fuzz.go @@ -0,0 +1,31 @@ +// +build gofuzz + +package consensus + +import ( + "bytes" + "io" +) + +func Fuzz(data []byte) int { + dec := NewWALDecoder(bytes.NewReader(data)) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } + if err != nil { + if msg != nil { + panic("msg != nil on error") + } + return 0 + } + var w bytes.Buffer + enc := NewWALEncoder(&w) + err = enc.Encode(msg) + if err != nil { + panic(err) + } + } + return 1 +} diff --git a/test/maverick/consensus/wal_generator.go b/test/maverick/consensus/wal_generator.go new file mode 100644 index 000000000..c691f371d --- /dev/null +++ b/test/maverick/consensus/wal_generator.go @@ -0,0 +1,232 @@ +package consensus + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "testing" + "time" + + db "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/abci/example/kvstore" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/types" +) + +// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a +// stripped down version of node (proxy app, event bus, consensus state) with a +// persistent kvstore application and special consensus wal instance +// (byteBufferWAL) and waits until numBlocks are created. +// If the node fails to produce given numBlocks, it returns an error. +func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { + config := getConfig(t) + + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + + logger := log.TestingLogger().With("wal_generator", "wal_generator") + logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) + + // /////////////////////////////////////////////////////////////////////////// + // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS + // NOTE: we can't import node package because of circular dependency. + // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. + privValidatorKeyFile := config.PrivValidatorKeyFile() + privValidatorStateFile := config.PrivValidatorStateFile() + privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) + if err != nil { + return err + } + genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return fmt.Errorf("failed to read genesis file: %w", err) + } + blockStoreDB := db.NewMemDB() + stateDB := blockStoreDB + stateStore := sm.NewStore(stateDB) + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return fmt.Errorf("failed to make genesis state: %w", err) + } + state.Version.Consensus.App = kvstore.ProtocolVersion + if err = stateStore.Save(state); err != nil { + t.Error(err) + } + + blockStore := store.NewBlockStore(blockStoreDB) + + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return fmt.Errorf("failed to start proxy app connections: %w", err) + } + t.Cleanup(func() { + if err := proxyApp.Stop(); err != nil { + t.Error(err) + } + }) + + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return fmt.Errorf("failed to start event bus: %w", err) + } + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + mempool := emptyMempool{} + evpool := sm.EmptyEvidencePool{} + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + consensusState := NewState(config.Consensus, state.Copy(), + blockExec, blockStore, mempool, evpool, map[int64]Misbehavior{}) + consensusState.SetLogger(logger) + consensusState.SetEventBus(eventBus) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + // END OF COPY PASTE + // /////////////////////////////////////////////////////////////////////////// + + // set consensus wal to buffered WAL, which will write all incoming msgs to buffer + numBlocksWritten := make(chan struct{}) + wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) + // see wal.go#103 + if err := wal.Write(EndHeightMessage{0}); err != nil { + t.Error(err) + } + + consensusState.wal = wal + + if err := consensusState.Start(); err != nil { + return fmt.Errorf("failed to start consensus state: %w", err) + } + + select { + case <-numBlocksWritten: + if err := consensusState.Stop(); err != nil { + t.Error(err) + } + return nil + case <-time.After(1 * time.Minute): + if err := consensusState.Stop(); err != nil { + t.Error(err) + } + return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + } +} + +// WALWithNBlocks returns a WAL content with numBlocks. +func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { + var b bytes.Buffer + wr := bufio.NewWriter(&b) + + if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { + return []byte{}, err + } + + wr.Flush() + return b.Bytes(), nil +} + +func randPort() int { + // returns between base and base + spread + base, spread := 20000, 20000 + return base + tmrand.Intn(spread) +} + +func makeAddrs() (string, string, string) { + start := randPort() + return fmt.Sprintf("tcp://127.0.0.1:%d", start), + fmt.Sprintf("tcp://127.0.0.1:%d", start+1), + fmt.Sprintf("tcp://127.0.0.1:%d", start+2) +} + +// getConfig returns a config for test cases +func getConfig(t *testing.T) *cfg.Config { + c := cfg.ResetTestRoot(t.Name()) + + // and we use random ports to run in parallel + tm, rpc, grpc := makeAddrs() + c.P2P.ListenAddress = tm + c.RPC.ListenAddress = rpc + c.RPC.GRPCListenAddress = grpc + return c +} + +// byteBufferWAL is a WAL which writes all msgs to a byte buffer. Writing stops +// when the heightToStop is reached. Client will be notified via +// signalWhenStopsTo channel. +type byteBufferWAL struct { + enc *WALEncoder + stopped bool + heightToStop int64 + signalWhenStopsTo chan<- struct{} + + logger log.Logger +} + +// needed for determinism +var fixedTime, _ = time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") + +func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalStop chan<- struct{}) *byteBufferWAL { + return &byteBufferWAL{ + enc: enc, + heightToStop: nBlocks, + signalWhenStopsTo: signalStop, + logger: logger, + } +} + +// Save writes message to the internal buffer except when heightToStop is +// reached, in which case it will signal the caller via signalWhenStopsTo and +// skip writing. +func (w *byteBufferWAL) Write(m WALMessage) error { + if w.stopped { + w.logger.Debug("WAL already stopped. Not writing message", "msg", m) + return nil + } + + if endMsg, ok := m.(EndHeightMessage); ok { + w.logger.Debug("WAL write end height message", "height", endMsg.Height, "stopHeight", w.heightToStop) + if endMsg.Height == w.heightToStop { + w.logger.Debug("Stopping WAL at height", "height", endMsg.Height) + w.signalWhenStopsTo <- struct{}{} + w.stopped = true + return nil + } + } + + w.logger.Debug("WAL Write Message", "msg", m) + err := w.enc.Encode(&TimedWALMessage{fixedTime, m}) + if err != nil { + panic(fmt.Sprintf("failed to encode the msg %v", m)) + } + + return nil +} + +func (w *byteBufferWAL) WriteSync(m WALMessage) error { + return w.Write(m) +} + +func (w *byteBufferWAL) FlushAndSync() error { return nil } + +func (w *byteBufferWAL) SearchForEndHeight( + height int64, + options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + return nil, false, nil +} + +func (w *byteBufferWAL) Start() error { return nil } +func (w *byteBufferWAL) Stop() error { return nil } +func (w *byteBufferWAL) Wait() {} diff --git a/test/maverick/main.go b/test/maverick/main.go new file mode 100644 index 000000000..d7a209e38 --- /dev/null +++ b/test/maverick/main.go @@ -0,0 +1,250 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" + tmflags "github.com/tendermint/tendermint/libs/cli/flags" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/p2p" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + cs "github.com/tendermint/tendermint/test/maverick/consensus" + nd "github.com/tendermint/tendermint/test/maverick/node" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" +) + +var ( + config = cfg.DefaultConfig() + logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + misbehaviorFlag = "" +) + +func init() { + registerFlagsRootCmd(RootCmd) +} + +func registerFlagsRootCmd(command *cobra.Command) { + command.PersistentFlags().String("log_level", config.LogLevel, "Log level") +} + +func ParseConfig() (*cfg.Config, error) { + conf := cfg.DefaultConfig() + err := viper.Unmarshal(conf) + if err != nil { + return nil, err + } + conf.SetRoot(conf.RootDir) + cfg.EnsureRoot(conf.RootDir) + if err = conf.ValidateBasic(); err != nil { + return nil, fmt.Errorf("error in config file: %v", err) + } + return conf, err +} + +// RootCmd is the root command for Tendermint core. +var RootCmd = &cobra.Command{ + Use: "maverick", + Short: "Tendermint Maverick Node", + Long: "Tendermint Maverick Node for testing with faulty consensus misbehaviors in a testnet. Contains " + + "all the functionality of a normal node but custom misbehaviors can be injected when running the node " + + "through a flag. See maverick node --help for how the misbehavior flag is constructured", + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + fmt.Printf("use: %v, args: %v", cmd.Use, cmd.Args) + config, err = ParseConfig() + if err != nil { + return err + } + if config.LogFormat == cfg.LogFormatJSON { + logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout)) + } + logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) + if err != nil { + return err + } + if viper.GetBool(cli.TraceFlag) { + logger = log.NewTracingLogger(logger) + } + logger = logger.With("module", "main") + return nil + }, +} + +func main() { + rootCmd := RootCmd + rootCmd.AddCommand( + ListMisbehaviorCmd, + cmd.GenValidatorCmd, + InitFilesCmd, + cmd.ProbeUpnpCmd, + cmd.ReplayCmd, + cmd.ReplayConsoleCmd, + cmd.ResetAllCmd, + cmd.ResetPrivValidatorCmd, + cmd.ShowValidatorCmd, + cmd.ShowNodeIDCmd, + cmd.GenNodeKeyCmd, + cmd.VersionCmd, + debug.DebugCmd, + cli.NewCompletionCmd(rootCmd, true), + ) + + nodeCmd := &cobra.Command{ + Use: "node", + Short: "Run the maverick node", + RunE: func(command *cobra.Command, args []string) error { + return startNode(config, logger, misbehaviorFlag) + }, + } + + cmd.AddNodeFlags(nodeCmd) + + // Create & start node + rootCmd.AddCommand(nodeCmd) + + // add special flag for misbehaviors + nodeCmd.Flags().StringVar( + &misbehaviorFlag, + "misbehaviors", + "", + "Select the misbehaviors of the node (comma-separated, no spaces in between): \n"+ + "e.g. --misbehaviors double-prevote,3\n"+ + "You can also have multiple misbehaviors: e.g. double-prevote,3,no-vote,5") + + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + if err := cmd.Execute(); err != nil { + panic(err) + } +} + +func startNode(config *cfg.Config, logger log.Logger, misbehaviorFlag string) error { + misbehaviors, err := nd.ParseMisbehaviors(misbehaviorFlag) + if err != nil { + return err + } + + node, err := nd.DefaultNewNode(config, logger, misbehaviors) + if err != nil { + return fmt.Errorf("failed to create node: %w", err) + } + + if err := node.Start(); err != nil { + return fmt.Errorf("failed to start node: %w", err) + } + + logger.Info("Started node", "nodeInfo", node.Switch().NodeInfo()) + + // Stop upon receiving SIGTERM or CTRL-C. + tmos.TrapSignal(logger, func() { + if node.IsRunning() { + if err := node.Stop(); err != nil { + logger.Error("unable to stop the node", "error", err) + } + } + }) + + // Run forever. + select {} +} + +var keyType string + +var InitFilesCmd = &cobra.Command{ + Use: "init", + Short: "Initialize Tendermint", + RunE: initFiles, +} + +func init() { + InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") +} + +func initFiles(cmd *cobra.Command, args []string) error { + return initFilesWithConfig(config) +} + +func initFilesWithConfig(config *cfg.Config) error { + // private validator + privValKeyFile := config.PrivValidatorKeyFile() + privValStateFile := config.PrivValidatorStateFile() + var pv *nd.FilePV + if tmos.FileExists(privValKeyFile) { + pv = nd.LoadFilePV(privValKeyFile, privValStateFile) + logger.Info("Found private validator", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } else { + pv = nd.GenFilePV(privValKeyFile, privValStateFile) + pv.Save() + logger.Info("Generated private validator", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } + + nodeKeyFile := config.NodeKeyFile() + if tmos.FileExists(nodeKeyFile) { + logger.Info("Found node key", "path", nodeKeyFile) + } else { + if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { + return err + } + logger.Info("Generated node key", "path", nodeKeyFile) + } + + // genesis file + genFile := config.GenesisFile() + if tmos.FileExists(genFile) { + logger.Info("Found genesis file", "path", genFile) + } else { + genDoc := types.GenesisDoc{ + ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), + GenesisTime: tmtime.Now(), + ConsensusParams: types.DefaultConsensusParams(), + } + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } + } + pubKey, err := pv.GetPubKey() + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + genDoc.Validators = []types.GenesisValidator{{ + Address: pubKey.Address(), + PubKey: pubKey, + Power: 10, + }} + + if err := genDoc.SaveAs(genFile); err != nil { + return err + } + logger.Info("Generated genesis file", "path", genFile) + } + + return nil +} + +var ListMisbehaviorCmd = &cobra.Command{ + Use: "misbehaviors", + Short: "Lists possible misbehaviors", + RunE: listMisbehaviors, +} + +func listMisbehaviors(cmd *cobra.Command, args []string) error { + str := "Currently registered misbehaviors: \n" + for key := range cs.MisbehaviorList { + str += fmt.Sprintf("- %s\n", key) + } + fmt.Println(str) + return nil +} diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go new file mode 100644 index 000000000..e1f41b6fb --- /dev/null +++ b/test/maverick/node/node.go @@ -0,0 +1,1440 @@ +package node + +import ( + "bytes" + "context" + "errors" + "fmt" + "net" + "net/http" + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/cors" + + dbm "github.com/tendermint/tm-db" + + abci "github.com/tendermint/tendermint/abci/types" + bcv0 "github.com/tendermint/tendermint/blockchain/v0" + bcv1 "github.com/tendermint/tendermint/blockchain/v1" + bcv2 "github.com/tendermint/tendermint/blockchain/v2" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/evidence" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/light" + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/pex" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + rpccore "github.com/tendermint/tendermint/rpc/core" + grpccore "github.com/tendermint/tendermint/rpc/grpc" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/state/txindex" + "github.com/tendermint/tendermint/state/txindex/kv" + "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/statesync" + "github.com/tendermint/tendermint/store" + cs "github.com/tendermint/tendermint/test/maverick/consensus" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" +) + +//------------------------------------------------------------------------------ + +// ParseMisbehaviors is a util function that converts a comma separated string into +// a map of misbehaviors to be executed by the maverick node +func ParseMisbehaviors(str string) (map[int64]cs.Misbehavior, error) { + // check if string is empty in which case we run a normal node + var misbehaviors = make(map[int64]cs.Misbehavior) + if str == "" { + return misbehaviors, nil + } + strs := strings.Split(str, ",") + if len(strs)%2 != 0 { + return misbehaviors, errors.New("missing either height or misbehavior name in the misbehavior flag") + } +OUTER_LOOP: + for i := 0; i < len(strs); i += 2 { + height, err := strconv.ParseInt(strs[i+1], 10, 64) + if err != nil { + return misbehaviors, fmt.Errorf("failed to parse misbehavior height: %w", err) + } + for key, misbehavior := range cs.MisbehaviorList { + if key == strs[i] { + misbehaviors[height] = misbehavior + continue OUTER_LOOP + } + } + return misbehaviors, fmt.Errorf("received unknown misbehavior: %s. Did you forget to add it?", strs[i]) + } + + return misbehaviors, nil +} + +// DBContext specifies config information for loading a new DB. +type DBContext struct { + ID string + Config *cfg.Config +} + +// DBProvider takes a DBContext and returns an instantiated DB. +type DBProvider func(*DBContext) (dbm.DB, error) + +// DefaultDBProvider returns a database using the DBBackend and DBDir +// specified in the ctx.Config. +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +} + +// GenesisDocProvider returns a GenesisDoc. +// It allows the GenesisDoc to be pulled from sources other than the +// filesystem, for instance from a distributed key-value store cluster. +type GenesisDocProvider func() (*types.GenesisDoc, error) + +// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads +// the GenesisDoc from the config.GenesisFile() on the filesystem. +func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { + return func() (*types.GenesisDoc, error) { + return types.GenesisDocFromFile(config.GenesisFile()) + } +} + +// Provider takes a config and a logger and returns a ready to go Node. +type Provider func(*cfg.Config, log.Logger) (*Node, error) + +// DefaultNewNode returns a Tendermint node with default settings for the +// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. +// It implements NodeProvider. +func DefaultNewNode(config *cfg.Config, logger log.Logger, misbehaviors map[int64]cs.Misbehavior) (*Node, error) { + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, fmt.Errorf("failed to load or gen node key %s, err: %w", config.NodeKeyFile(), err) + } + + return NewNode(config, + LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()), + nodeKey, + proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), + DefaultGenesisDocProviderFunc(config), + DefaultDBProvider, + DefaultMetricsProvider(config.Instrumentation), + logger, + misbehaviors, + ) + +} + +// MetricsProvider returns a consensus, p2p and mempool Metrics. +type MetricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) + +// DefaultMetricsProvider returns Metrics build using Prometheus client library +// if Prometheus is enabled. Otherwise, it returns no-op Metrics. +func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { + return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { + if config.Prometheus { + return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), + p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), + mempl.PrometheusMetrics(config.Namespace, "chain_id", chainID), + sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) + } + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() + } +} + +// Option sets a parameter for the node. +type Option func(*Node) + +// Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors. +// See: https://github.com/tendermint/tendermint/issues/4595 +type fastSyncReactor interface { + SwitchToFastSync(sm.State) error +} + +// CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to +// the node's Switch. +// +// WARNING: using any name from the below list of the existing reactors will +// result in replacing it with the custom one. +// +// - MEMPOOL +// - BLOCKCHAIN +// - CONSENSUS +// - EVIDENCE +// - PEX +// - STATESYNC +func CustomReactors(reactors map[string]p2p.Reactor) Option { + return func(n *Node) { + for name, reactor := range reactors { + if existingReactor := n.sw.Reactor(name); existingReactor != nil { + n.sw.Logger.Info("Replacing existing reactor with a custom one", + "name", name, "existing", existingReactor, "custom", reactor) + n.sw.RemoveReactor(name, existingReactor) + } + n.sw.AddReactor(name, reactor) + } + } +} + +func CustomReactorsAsConstructors(reactors map[string]func(n *Node) p2p.Reactor) Option { + return func(n *Node) { + for name, customReactor := range reactors { + if existingReactor := n.sw.Reactor(name); existingReactor != nil { + n.sw.Logger.Info("Replacing existing reactor with a custom one", + "name", name) + n.sw.RemoveReactor(name, existingReactor) + } + n.sw.AddReactor(name, customReactor(n)) + } + } +} + +// StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and +// build a State object for bootstrapping the node. +// WARNING: this interface is considered unstable and subject to change. +func StateProvider(stateProvider statesync.StateProvider) Option { + return func(n *Node) { + n.stateSyncProvider = stateProvider + } +} + +//------------------------------------------------------------------------------ + +// Node is the highest level interface to a full Tendermint node. +// It includes all configuration information and running services. +type Node struct { + service.BaseService + + // config + config *cfg.Config + genesisDoc *types.GenesisDoc // initial validator set + privValidator types.PrivValidator // local node's validator key + + // network + transport *p2p.MultiplexTransport + sw *p2p.Switch // p2p connections + addrBook pex.AddrBook // known peers + nodeInfo p2p.NodeInfo + nodeKey *p2p.NodeKey // our node privkey + isListening bool + + // services + eventBus *types.EventBus // pub/sub for services + stateStore sm.Store + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool + stateSync bool // whether the node should state sync on startup + stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots + stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node + stateSyncGenesis sm.State // provides the genesis state for state sync + consensusState *cs.State // latest consensus state + consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses + evidencePool *evidence.Pool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server +} + +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { + var blockStoreDB dbm.DB + blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) + if err != nil { + return + } + blockStore = store.NewBlockStore(blockStoreDB) + + stateDB, err = dbProvider(&DBContext{"state", config}) + if err != nil { + return + } + + return +} + +func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator) + proxyApp.SetLogger(logger.With("module", "proxy")) + if err := proxyApp.Start(); err != nil { + return nil, fmt.Errorf("error starting proxy app connections: %v", err) + } + return proxyApp, nil +} + +func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + if err := eventBus.Start(); err != nil { + return nil, err + } + return eventBus, nil +} + +func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, + eventBus *types.EventBus, logger log.Logger) (*txindex.IndexerService, txindex.TxIndexer, error) { + + var txIndexer txindex.TxIndexer + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, nil, err + } + txIndexer = kv.NewTxIndex(store) + default: + txIndexer = &null.TxIndex{} + } + + indexerService := txindex.NewIndexerService(txIndexer, eventBus) + indexerService.SetLogger(logger.With("module", "txindex")) + if err := indexerService.Start(); err != nil { + return nil, nil, err + } + return indexerService, txIndexer, nil +} + +func doHandshake( + stateStore sm.Store, + state sm.State, + blockStore sm.BlockStore, + genDoc *types.GenesisDoc, + eventBus types.BlockEventPublisher, + proxyApp proxy.AppConns, + consensusLogger log.Logger) error { + + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + handshaker.SetEventBus(eventBus) + if err := handshaker.Handshake(proxyApp); err != nil { + return fmt.Errorf("error during handshake: %v", err) + } + return nil +} + +func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger) { + // Log the version info. + logger.Info("Version info", + "software", version.TMCoreSemVer, + "block", version.BlockProtocol, + "p2p", version.P2PProtocol, + ) + + // If the state and software differ in block version, at least log it. + if state.Version.Consensus.Block != version.BlockProtocol { + logger.Info("Software and state have different block protocols", + "software", version.BlockProtocol, + "state", state.Version.Consensus.Block, + ) + } + + addr := pubKey.Address() + // Log whether this node is a validator or an observer + if state.Validators.HasAddress(addr) { + consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey) + } else { + consensusLogger.Info("This node is not a validator", "addr", addr, "pubKey", pubKey) + } +} + +func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { + if state.Validators.Size() > 1 { + return false + } + addr, _ := state.Validators.GetByIndex(0) + return bytes.Equal(pubKey.Address(), addr) +} + +func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, + state sm.State, memplMetrics *mempl.Metrics, logger log.Logger) (*mempl.Reactor, *mempl.CListMempool) { + + mempool := mempl.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), + ) + mempoolLogger := logger.With("module", "mempool") + mempoolReactor := mempl.NewReactor(config.Mempool, mempool) + mempoolReactor.SetLogger(mempoolLogger) + + if config.Consensus.WaitForTxs() { + mempool.EnableTxsAvailable() + } + return mempoolReactor, mempool +} + +func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, + stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { + + evidenceDB, err := dbProvider(&DBContext{"evidence", config}) + if err != nil { + return nil, nil, err + } + evidenceLogger := logger.With("module", "evidence") + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) + if err != nil { + return nil, nil, err + } + evidenceReactor := evidence.NewReactor(evidencePool) + evidenceReactor.SetLogger(evidenceLogger) + return evidenceReactor, evidencePool, nil +} + +func createBlockchainReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore *store.BlockStore, + fastSync bool, + logger log.Logger) (bcReactor p2p.Reactor, err error) { + + switch config.FastSync.Version { + case "v0": + bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + case "v1": + bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + case "v2": + bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + } + + bcReactor.SetLogger(logger.With("module", "blockchain")) + return bcReactor, nil +} + +func createConsensusReactor(config *cfg.Config, + state sm.State, + blockExec *sm.BlockExecutor, + blockStore sm.BlockStore, + mempool *mempl.CListMempool, + evidencePool *evidence.Pool, + privValidator types.PrivValidator, + csMetrics *cs.Metrics, + waitSync bool, + eventBus *types.EventBus, + consensusLogger log.Logger, + misbehaviors map[int64]cs.Misbehavior) (*cs.Reactor, *cs.State) { + + consensusState := cs.NewState( + config.Consensus, + state.Copy(), + blockExec, + blockStore, + mempool, + evidencePool, + misbehaviors, + cs.StateMetrics(csMetrics), + ) + consensusState.SetLogger(consensusLogger) + if privValidator != nil { + consensusState.SetPrivValidator(privValidator) + } + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) + consensusReactor.SetLogger(consensusLogger) + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor + consensusReactor.SetEventBus(eventBus) + return consensusReactor, consensusState +} + +func createTransport( + config *cfg.Config, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + proxyApp proxy.AppConns, +) ( + *p2p.MultiplexTransport, + []p2p.PeerFilterFunc, +) { + var ( + mConnConfig = p2p.MConnConfig(config.P2P) + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) + + if !config.P2P.AllowDuplicateIP { + connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) + } + + // Filter peers by addr or pubkey with an ABCI query. + // If the query return code is OK, add peer. + if config.FilterPeers { + connFilters = append( + connFilters, + // ABCI query for address filtering. + func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { + res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + + peerFilters = append( + peerFilters, + // ABCI query for ID filtering. + func(_ p2p.IPeerSet, p p2p.Peer) error { + res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("error querying abci app: %v", res) + } + + return nil + }, + ) + } + + p2p.MultiplexTransportConnFilters(connFilters...)(transport) + + // Limit the number of incoming connections. + max := config.P2P.MaxNumInboundPeers + len(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + p2p.MultiplexTransportMaxIncomingConnections(max)(transport) + + return transport, peerFilters +} + +func createSwitch(config *cfg.Config, + transport p2p.Transport, + p2pMetrics *p2p.Metrics, + peerFilters []p2p.PeerFilterFunc, + mempoolReactor *mempl.Reactor, + bcReactor p2p.Reactor, + stateSyncReactor *statesync.Reactor, + consensusReactor *cs.Reactor, + evidenceReactor *evidence.Reactor, + nodeInfo p2p.NodeInfo, + nodeKey *p2p.NodeKey, + p2pLogger log.Logger) *p2p.Switch { + + sw := p2p.NewSwitch( + config.P2P, + transport, + p2p.WithMetrics(p2pMetrics), + p2p.SwitchPeerFilters(peerFilters...), + ) + sw.SetLogger(p2pLogger) + sw.AddReactor("MEMPOOL", mempoolReactor) + sw.AddReactor("BLOCKCHAIN", bcReactor) + sw.AddReactor("CONSENSUS", consensusReactor) + sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) + + sw.SetNodeInfo(nodeInfo) + sw.SetNodeKey(nodeKey) + + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) + return sw +} + +func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, + p2pLogger log.Logger, nodeKey *p2p.NodeKey) (pex.AddrBook, error) { + + addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Add ourselves to addrbook to prevent dialing ourselves + if config.P2P.ExternalAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ExternalAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + if config.P2P.ListenAddress != "" { + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID(), config.P2P.ListenAddress)) + if err != nil { + return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) + } + addrBook.AddOurAddress(addr) + } + + sw.SetAddrBook(addrBook) + + return addrBook, nil +} + +func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, + sw *p2p.Switch, logger log.Logger) *pex.Reactor { + + // TODO persistent peers ? so we can have their DNS addrs saved + pexReactor := pex.NewReactor(addrBook, + &pex.ReactorConfig{ + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), + SeedMode: config.P2P.SeedMode, + // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 + // blocks assuming 10s blocks ~ 28 hours. + // TODO (melekes): make it dynamic based on the actual block latencies + // from the live network. + // https://github.com/tendermint/tendermint/issues/3523 + SeedDisconnectWaitPeriod: 28 * time.Hour, + PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, + }) + pexReactor.SetLogger(logger.With("module", "pex")) + sw.AddReactor("PEX", pexReactor) + return pexReactor +} + +// startStateSync starts an asynchronous state sync process, then switches to fast sync mode. +func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, + stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, + stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { + ssR.Logger.Info("Starting state sync") + + if stateProvider == nil { + var err error + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stateProvider, err = statesync.NewLightClientStateProvider( + ctx, + state.ChainID, state.Version, state.InitialHeight, + config.RPCServers, light.TrustOptions{ + Period: config.TrustPeriod, + Height: config.TrustHeight, + Hash: config.TrustHashBytes(), + }, ssR.Logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + } + + go func() { + state, commit, err := ssR.Sync(stateProvider, config.DiscoveryTime) + if err != nil { + ssR.Logger.Error("State sync failed", "err", err) + return + } + err = stateStore.Bootstrap(state) + if err != nil { + ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) + return + } + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + ssR.Logger.Error("Failed to store last seen commit", "err", err) + return + } + + if fastSync { + // FIXME Very ugly to have these metrics bleed through here. + conR.Metrics.StateSyncing.Set(0) + conR.Metrics.FastSyncing.Set(1) + err = bcR.SwitchToFastSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to fast sync", "err", err) + return + } + } else { + conR.SwitchToConsensus(state, true) + } + }() + return nil +} + +// NewNode returns a new, ready to go, Tendermint Node. +func NewNode(config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + misbehaviors map[int64]cs.Misbehavior, + options ...Option) (*Node, error) { + + blockStore, stateDB, err := initDBs(config, dbProvider) + if err != nil { + return nil, err + } + + stateStore := sm.NewStore(stateDB) + + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) + if err != nil { + return nil, err + } + + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + if err != nil { + return nil, err + } + + // EventBus and IndexerService must be started before the handshake because + // we might need to index the txs of the replayed block as this might not have happened + // when the node stopped last time (i.e. the node stopped after it saved the block + // but before it indexed the txs, or, endblocker panicked) + eventBus, err := createAndStartEventBus(logger) + if err != nil { + return nil, err + } + + // Transaction indexing + indexerService, txIndexer, err := createAndStartIndexerService(config, dbProvider, eventBus, logger) + if err != nil { + return nil, err + } + + // If an address is provided, listen on the socket for a connection from an + // external signing process. + if config.PrivValidatorListenAddr != "" { + // FIXME: we should start services inside OnStart + privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidatorListenAddr, genDoc.ChainID, logger) + if err != nil { + return nil, fmt.Errorf("error with private validator socket client: %w", err) + } + } + + pubKey, err := privValidator.GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + // Determine whether we should do state and/or fast sync. + // We don't fast-sync when the only validator is us. + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + if stateSync && state.LastBlockHeight > 0 { + logger.Info("Found local state with non-zero height, skipping state sync") + stateSync = false + } + + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if !stateSync { + if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state, err = stateStore.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } + } + + logNodeStartupInfo(state, pubKey, logger, consensusLogger) + + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) + + // Make MempoolReactor + mempoolReactor, mempool := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) + + // Make Evidence Reactor + evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) + if err != nil { + return nil, err + } + + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor( + stateStore, + logger.With("module", "state"), + proxyApp.Consensus(), + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), + ) + + // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger) + if err != nil { + return nil, fmt.Errorf("could not create blockchain reactor: %w", err) + } + + // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. + // FIXME We need to update metrics here, since other reactors don't have access to them. + if stateSync { + csMetrics.StateSyncing.Set(1) + } else if fastSync { + csMetrics.FastSyncing.Set(1) + } + + logger.Info("Setting up maverick consensus reactor", "Misbehaviors", misbehaviors) + consensusReactor, consensusState := createConsensusReactor( + config, state, blockExec, blockStore, mempool, evidencePool, + privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, misbehaviors) + + // Set up state sync reactor, and schedule a sync if requested. + // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // we should clean this whole thing up. See: + // https://github.com/tendermint/tendermint/issues/4644 + stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(), + config.StateSync.TempDir) + stateSyncReactor.SetLogger(logger.With("module", "statesync")) + + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport. + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + + // Setup Switch. + p2pLogger := logger.With("module", "p2p") + sw := createSwitch( + config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, + stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + ) + + err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) + } + + err = sw.AddUnconditionalPeerIDs(splitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) + if err != nil { + return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) + } + + addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) + if err != nil { + return nil, fmt.Errorf("could not create addrbook: %w", err) + } + + // Optionally, start the pex reactor + // + // TODO: + // + // We need to set Seeds and PersistentPeers on the switch, + // since it needs to be able to use these (and their DNS names) + // even if the PEX is off. We can include the DNS name in the NetAddress, + // but it would still be nice to have a clear list of the current "PersistentPeers" + // somewhere that we can return with net_info. + // + // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. + // Note we currently use the addrBook regardless at least for AddOurAddress + var pexReactor *pex.Reactor + if config.P2P.PexReactor { + pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) + } + + if config.RPC.PprofListenAddress != "" { + go func() { + logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + }() + } + + node := &Node{ + config: config, + genesisDoc: genDoc, + privValidator: privValidator, + + transport: transport, + sw: sw, + addrBook: addrBook, + nodeInfo: nodeInfo, + nodeKey: nodeKey, + + stateStore: stateStore, + blockStore: blockStore, + bcReactor: bcReactor, + mempoolReactor: mempoolReactor, + mempool: mempool, + consensusState: consensusState, + consensusReactor: consensusReactor, + stateSyncReactor: stateSyncReactor, + stateSync: stateSync, + stateSyncGenesis: state, // Shouldn't be necessary, but need a way to pass the genesis state + pexReactor: pexReactor, + evidencePool: evidencePool, + proxyApp: proxyApp, + txIndexer: txIndexer, + indexerService: indexerService, + eventBus: eventBus, + } + node.BaseService = *service.NewBaseService(logger, "Node", node) + + for _, option := range options { + option(node) + } + + return node, nil +} + +// OnStart starts the Node. It implements service.Service. +func (n *Node) OnStart() error { + now := tmtime.Now() + genTime := n.genesisDoc.GenesisTime + if genTime.After(now) { + n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + time.Sleep(genTime.Sub(now)) + } + + // Add private IDs to addrbook to block those peers being added + n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) + + // Start the RPC server before the P2P server + // so we can eg. receive txs for the first block + if n.config.RPC.ListenAddress != "" { + listeners, err := n.startRPC() + if err != nil { + return err + } + n.rpcListeners = listeners + } + + if n.config.Instrumentation.Prometheus && + n.config.Instrumentation.PrometheusListenAddr != "" { + n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) + } + + // Start the transport. + addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID(), n.config.P2P.ListenAddress)) + if err != nil { + return err + } + if err := n.transport.Listen(*addr); err != nil { + return err + } + + n.isListening = true + + if n.config.Mempool.WalEnabled() { + err = n.mempool.InitWAL() + if err != nil { + return fmt.Errorf("init mempool WAL: %w", err) + } + } + + // Start the switch (the P2P server). + err = n.sw.Start() + if err != nil { + return err + } + + // Always connect to persistent peers + err = n.sw.DialPeersAsync(splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) + if err != nil { + return fmt.Errorf("could not dial peers from persistent_peers field: %w", err) + } + + // Run state sync + if n.stateSync { + bcR, ok := n.bcReactor.(fastSyncReactor) + if !ok { + return fmt.Errorf("this blockchain reactor does not support switching from state sync") + } + err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) + if err != nil { + return fmt.Errorf("failed to start state sync: %w", err) + } + } + + return nil +} + +// OnStop stops the Node. It implements service.Service. +func (n *Node) OnStop() { + n.BaseService.OnStop() + + n.Logger.Info("Stopping Node") + + // first stop the non-reactor services + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } + + // now stop the reactors + if err := n.sw.Stop(); err != nil { + n.Logger.Error("Error closing switch", "err", err) + } + + // stop mempool WAL + if n.config.Mempool.WalEnabled() { + n.mempool.CloseWAL() + } + + if err := n.transport.Close(); err != nil { + n.Logger.Error("Error closing transport", "err", err) + } + + n.isListening = false + + // finally stop the listeners / external services + for _, l := range n.rpcListeners { + n.Logger.Info("Closing rpc listener", "listener", l) + if err := l.Close(); err != nil { + n.Logger.Error("Error closing listener", "listener", l, "err", err) + } + } + + if pvsc, ok := n.privValidator.(service.Service); ok { + if err := pvsc.Stop(); err != nil { + n.Logger.Error("Error closing private validator", "err", err) + } + } + + if n.prometheusSrv != nil { + if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + } + } +} + +// ConfigureRPC makes sure RPC has all the objects it needs to operate. +func (n *Node) ConfigureRPC() error { + pubKey, err := n.privValidator.GetPubKey() + if err != nil { + return fmt.Errorf("can't get pubkey: %w", err) + } + rpccore.SetEnvironment(&rpccore.Environment{ + ProxyAppQuery: n.proxyApp.Query(), + ProxyAppMempool: n.proxyApp.Mempool(), + + StateStore: n.stateStore, + BlockStore: n.blockStore, + EvidencePool: n.evidencePool, + ConsensusState: n.consensusState, + P2PPeers: n.sw, + P2PTransport: n, + + PubKey: pubKey, + GenDoc: n.genesisDoc, + TxIndexer: n.txIndexer, + ConsensusReactor: &consensus.Reactor{}, + EventBus: n.eventBus, + Mempool: n.mempool, + + Logger: n.Logger.With("module", "rpc"), + + Config: *n.config.RPC, + }) + return nil +} + +func (n *Node) startRPC() ([]net.Listener, error) { + err := n.ConfigureRPC() + if err != nil { + return nil, err + } + + listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") + + if n.config.RPC.Unsafe { + rpccore.AddUnsafeRoutes() + } + + config := rpcserver.DefaultConfig() + config.MaxBodyBytes = n.config.RPC.MaxBodyBytes + config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + // we may expose the rpc over both a unix and tcp socket + listeners := make([]net.Listener, len(listenAddrs)) + for i, listenAddr := range listenAddrs { + mux := http.NewServeMux() + rpcLogger := n.Logger.With("module", "rpc-server") + wmLogger := rpcLogger.With("protocol", "websocket") + wm := rpcserver.NewWebsocketManager(rpccore.Routes, + rpcserver.OnDisconnect(func(remoteAddr string) { + err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != tmpubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + }), + rpcserver.ReadLimit(config.MaxBodyBytes), + ) + wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) + listener, err := rpcserver.Listen( + listenAddr, + config, + ) + if err != nil { + return nil, err + } + + var rootHandler http.Handler = mux + if n.config.RPC.IsCorsEnabled() { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: n.config.RPC.CORSAllowedOrigins, + AllowedMethods: n.config.RPC.CORSAllowedMethods, + AllowedHeaders: n.config.RPC.CORSAllowedHeaders, + }) + rootHandler = corsMiddleware.Handler(mux) + } + if n.config.RPC.IsTLSEnabled() { + go func() { + if err := rpcserver.ServeTLS( + listener, + rootHandler, + n.config.RPC.CertFile(), + n.config.RPC.KeyFile(), + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server with TLS", "err", err) + } + }() + } else { + go func() { + if err := rpcserver.Serve( + listener, + rootHandler, + rpcLogger, + config, + ); err != nil { + n.Logger.Error("Error serving server", "err", err) + } + }() + } + + listeners[i] = listener + } + + // we expose a simplified api over grpc for convenience to app devs + grpcListenAddr := n.config.RPC.GRPCListenAddress + if grpcListenAddr != "" { + config := rpcserver.DefaultConfig() + config.MaxBodyBytes = n.config.RPC.MaxBodyBytes + config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections + config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + listener, err := rpcserver.Listen(grpcListenAddr, config) + if err != nil { + return nil, err + } + go func() { + if err := grpccore.StartGRPCServer(listener); err != nil { + n.Logger.Error("Error starting gRPC server", "err", err) + } + }() + listeners = append(listeners, listener) + } + + return listeners, nil +} + +// startPrometheusServer starts a Prometheus HTTP server, listening for metrics +// collectors on addr. +func (n *Node) startPrometheusServer(addr string) *http.Server { + srv := &http.Server{ + Addr: addr, + Handler: promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, promhttp.HandlerFor( + prometheus.DefaultGatherer, + promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections}, + ), + ), + } + go func() { + if err := srv.ListenAndServe(); err != http.ErrServerClosed { + // Error starting or closing listener: + n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + } + }() + return srv +} + +// Switch returns the Node's Switch. +func (n *Node) Switch() *p2p.Switch { + return n.sw +} + +// BlockStore returns the Node's BlockStore. +func (n *Node) BlockStore() *store.BlockStore { + return n.blockStore +} + +// ConsensusState returns the Node's ConsensusState. +func (n *Node) ConsensusState() *cs.State { + return n.consensusState +} + +// ConsensusReactor returns the Node's ConsensusReactor. +func (n *Node) ConsensusReactor() *cs.Reactor { + return n.consensusReactor +} + +// MempoolReactor returns the Node's mempool reactor. +func (n *Node) MempoolReactor() *mempl.Reactor { + return n.mempoolReactor +} + +// Mempool returns the Node's mempool. +func (n *Node) Mempool() mempl.Mempool { + return n.mempool +} + +// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. +func (n *Node) PEXReactor() *pex.Reactor { + return n.pexReactor +} + +// EvidencePool returns the Node's EvidencePool. +func (n *Node) EvidencePool() *evidence.Pool { + return n.evidencePool +} + +// EventBus returns the Node's EventBus. +func (n *Node) EventBus() *types.EventBus { + return n.eventBus +} + +// PrivValidator returns the Node's PrivValidator. +// XXX: for convenience only! +func (n *Node) PrivValidator() types.PrivValidator { + return n.privValidator +} + +// GenesisDoc returns the Node's GenesisDoc. +func (n *Node) GenesisDoc() *types.GenesisDoc { + return n.genesisDoc +} + +// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. +func (n *Node) ProxyApp() proxy.AppConns { + return n.proxyApp +} + +// Config returns the Node's config. +func (n *Node) Config() *cfg.Config { + return n.config +} + +//------------------------------------------------------------------------------ + +func (n *Node) Listeners() []string { + return []string{ + fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), + } +} + +func (n *Node) IsListening() bool { + return n.isListening +} + +// NodeInfo returns the Node's Info from the Switch. +func (n *Node) NodeInfo() p2p.NodeInfo { + return n.nodeInfo +} + +func makeNodeInfo( + config *cfg.Config, + nodeKey *p2p.NodeKey, + txIndexer txindex.TxIndexer, + genDoc *types.GenesisDoc, + state sm.State, +) (p2p.NodeInfo, error) { + txIndexerStatus := "on" + if _, ok := txIndexer.(*null.TxIndex); ok { + txIndexerStatus = "off" + } + + var bcChannel byte + switch config.FastSync.Version { + case "v0": + bcChannel = bcv0.BlockchainChannel + case "v1": + bcChannel = bcv1.BlockchainChannel + case "v2": + bcChannel = bcv2.BlockchainChannel + default: + return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) + } + + nodeInfo := p2p.DefaultNodeInfo{ + ProtocolVersion: p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + DefaultNodeID: nodeKey.ID(), + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, + Channels: []byte{ + bcChannel, + cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, + mempl.MempoolChannel, + evidence.EvidenceChannel, + statesync.SnapshotChannel, statesync.ChunkChannel, + }, + Moniker: config.Moniker, + Other: p2p.DefaultNodeInfoOther{ + TxIndex: txIndexerStatus, + RPCAddress: config.RPC.ListenAddress, + }, + } + + if config.P2P.PexReactor { + nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + } + + lAddr := config.P2P.ExternalAddress + + if lAddr == "" { + lAddr = config.P2P.ListenAddress + } + + nodeInfo.ListenAddr = lAddr + + err := nodeInfo.Validate() + return nodeInfo, err +} + +//------------------------------------------------------------------------------ + +var ( + genesisDocKey = []byte("genesisDoc") +) + +// LoadStateFromDBOrGenesisDocProvider attempts to load the state from the +// database, or creates one using the given genesisDocProvider and persists the +// result to the database. On success this also returns the genesis doc loaded +// through the given provider. +func LoadStateFromDBOrGenesisDocProvider( + stateDB dbm.DB, + genesisDocProvider GenesisDocProvider, +) (sm.State, *types.GenesisDoc, error) { + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() + if err != nil { + return sm.State{}, nil, err + } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) + if err != nil { + return sm.State{}, nil, err + } + return state, genDoc, nil +} + +// panics if failed to unmarshal bytes +func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { + b, err := db.Get(genesisDocKey) + if err != nil { + panic(err) + } + if len(b) == 0 { + return nil, errors.New("genesis doc not found") + } + var genDoc *types.GenesisDoc + err = tmjson.Unmarshal(b, &genDoc) + if err != nil { + panic(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, b)) + } + return genDoc, nil +} + +// panics if failed to marshal the given genesis document +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { + b, err := tmjson.Marshal(genDoc) + if err != nil { + panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + } + if err := db.SetSync(genesisDocKey, b); err != nil { + panic(fmt.Sprintf("Failed to save genesis doc: %v", err)) + } +} + +func createAndStartPrivValidatorSocketClient( + listenAddr, + chainID string, + logger log.Logger, +) (types.PrivValidator, error) { + pve, err := privval.NewSignerListener(listenAddr, logger) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + pvsc, err := privval.NewSignerClient(pve, chainID) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey() + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + const ( + retries = 50 // 50 * 100ms = 5s total + timeout = 100 * time.Millisecond + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/test/maverick/node/privval.go b/test/maverick/node/privval.go new file mode 100644 index 000000000..441b6ca9d --- /dev/null +++ b/test/maverick/node/privval.go @@ -0,0 +1,358 @@ +package node + +import ( + "errors" + "fmt" + "io/ioutil" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmjson "github.com/tendermint/tendermint/libs/json" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/tempfile" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +// ******************************************************************************************************************* +// +// WARNING: FOR TESTING ONLY. DO NOT USE THIS FILE OUTSIDE MAVERICK +// +// ******************************************************************************************************************* + +const ( + stepNone int8 = 0 // Used to distinguish the initial state + stepPropose int8 = 1 + stepPrevote int8 = 2 + stepPrecommit int8 = 3 +) + +// A vote is either stepPrevote or stepPrecommit. +func voteToStep(vote *tmproto.Vote) int8 { + switch vote.Type { + case tmproto.PrevoteType: + return stepPrevote + case tmproto.PrecommitType: + return stepPrecommit + default: + panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) + } +} + +//------------------------------------------------------------------------------- + +// FilePVKey stores the immutable part of PrivValidator. +type FilePVKey struct { + Address types.Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + PrivKey crypto.PrivKey `json:"priv_key"` + + filePath string +} + +// Save persists the FilePVKey to its filePath. +func (pvKey FilePVKey) Save() { + outFile := pvKey.filePath + if outFile == "" { + panic("cannot save PrivValidator key: filePath not set") + } + + jsonBytes, err := tmjson.MarshalIndent(pvKey, "", " ") + if err != nil { + panic(err) + } + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + if err != nil { + panic(err) + } + +} + +//------------------------------------------------------------------------------- + +// FilePVLastSignState stores the mutable part of PrivValidator. +type FilePVLastSignState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Step int8 `json:"step"` + Signature []byte `json:"signature,omitempty"` + SignBytes tmbytes.HexBytes `json:"signbytes,omitempty"` + + filePath string +} + +// CheckHRS checks the given height, round, step (HRS) against that of the +// FilePVLastSignState. It returns an error if the arguments constitute a regression, +// or if they match but the SignBytes are empty. +// The returned boolean indicates whether the last Signature should be reused - +// it returns true if the HRS matches the arguments and the SignBytes are not empty (indicating +// we have already signed for this HRS, and can reuse the existing signature). +// It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. +func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { + + if lss.Height > height { + return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) + } + + if lss.Height == height { + if lss.Round > round { + return false, fmt.Errorf("round regression at height %v. Got %v, last round %v", height, round, lss.Round) + } + + if lss.Round == round { + if lss.Step > step { + return false, fmt.Errorf( + "step regression at height %v round %v. Got %v, last step %v", + height, + round, + step, + lss.Step, + ) + } else if lss.Step == step { + if lss.SignBytes != nil { + if lss.Signature == nil { + panic("pv: Signature is nil but SignBytes is not!") + } + return true, nil + } + return false, errors.New("no SignBytes found") + } + } + } + return false, nil +} + +// Save persists the FilePvLastSignState to its filePath. +func (lss *FilePVLastSignState) Save() { + outFile := lss.filePath + if outFile == "" { + panic("cannot save FilePVLastSignState: filePath not set") + } + jsonBytes, err := tmjson.MarshalIndent(lss, "", " ") + if err != nil { + panic(err) + } + err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + if err != nil { + panic(err) + } +} + +//------------------------------------------------------------------------------- + +// FilePV implements PrivValidator using data persisted to disk +// to prevent double signing. +// NOTE: the directories containing pv.Key.filePath and pv.LastSignState.filePath must already exist. +// It includes the LastSignature and LastSignBytes so we don't lose the signature +// if the process crashes after signing but before the resulting consensus message is processed. +type FilePV struct { + Key FilePVKey + LastSignState FilePVLastSignState +} + +// GenFilePV generates a new validator with randomly generated private key +// and sets the filePaths, but does not call Save(). +func GenFilePV(keyFilePath, stateFilePath string) *FilePV { + privKey := ed25519.GenPrivKey() + + return &FilePV{ + Key: FilePVKey{ + Address: privKey.PubKey().Address(), + PubKey: privKey.PubKey(), + PrivKey: privKey, + filePath: keyFilePath, + }, + LastSignState: FilePVLastSignState{ + Step: stepNone, + filePath: stateFilePath, + }, + } +} + +// LoadFilePV loads a FilePV from the filePaths. The FilePV handles double +// signing prevention by persisting data to the stateFilePath. If either file path +// does not exist, the program will exit. +func LoadFilePV(keyFilePath, stateFilePath string) *FilePV { + return loadFilePV(keyFilePath, stateFilePath, true) +} + +// LoadFilePVEmptyState loads a FilePV from the given keyFilePath, with an empty LastSignState. +// If the keyFilePath does not exist, the program will exit. +func LoadFilePVEmptyState(keyFilePath, stateFilePath string) *FilePV { + return loadFilePV(keyFilePath, stateFilePath, false) +} + +// If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. +func loadFilePV(keyFilePath, stateFilePath string, loadState bool) *FilePV { + keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + if err != nil { + tmos.Exit(err.Error()) + } + pvKey := FilePVKey{} + err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + if err != nil { + tmos.Exit(fmt.Sprintf("Error reading PrivValidator key from %v: %v\n", keyFilePath, err)) + } + + // overwrite pubkey and address for convenience + pvKey.PubKey = pvKey.PrivKey.PubKey() + pvKey.Address = pvKey.PubKey.Address() + pvKey.filePath = keyFilePath + + pvState := FilePVLastSignState{} + + if loadState { + stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + if err != nil { + tmos.Exit(err.Error()) + } + err = tmjson.Unmarshal(stateJSONBytes, &pvState) + if err != nil { + tmos.Exit(fmt.Sprintf("Error reading PrivValidator state from %v: %v\n", stateFilePath, err)) + } + } + + pvState.filePath = stateFilePath + + return &FilePV{ + Key: pvKey, + LastSignState: pvState, + } +} + +// LoadOrGenFilePV loads a FilePV from the given filePaths +// or else generates a new one and saves it to the filePaths. +func LoadOrGenFilePV(keyFilePath, stateFilePath string) *FilePV { + var pv *FilePV + if tmos.FileExists(keyFilePath) { + pv = LoadFilePV(keyFilePath, stateFilePath) + } else { + pv = GenFilePV(keyFilePath, stateFilePath) + pv.Save() + } + return pv +} + +// GetAddress returns the address of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetAddress() types.Address { + return pv.Key.Address +} + +// GetPubKey returns the public key of the validator. +// Implements PrivValidator. +func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { + return pv.Key.PubKey, nil +} + +// SignVote signs a canonical representation of the vote, along with the +// chainID. Implements PrivValidator. +func (pv *FilePV) SignVote(chainID string, vote *tmproto.Vote) error { + if err := pv.signVote(chainID, vote); err != nil { + return fmt.Errorf("error signing vote: %v", err) + } + return nil +} + +// SignProposal signs a canonical representation of the proposal, along with +// the chainID. Implements PrivValidator. +func (pv *FilePV) SignProposal(chainID string, proposal *tmproto.Proposal) error { + if err := pv.signProposal(chainID, proposal); err != nil { + return fmt.Errorf("error signing proposal: %v", err) + } + return nil +} + +// Save persists the FilePV to disk. +func (pv *FilePV) Save() { + pv.Key.Save() + pv.LastSignState.Save() +} + +// Reset resets all fields in the FilePV. +// NOTE: Unsafe! +func (pv *FilePV) Reset() { + var sig []byte + pv.LastSignState.Height = 0 + pv.LastSignState.Round = 0 + pv.LastSignState.Step = 0 + pv.LastSignState.Signature = sig + pv.LastSignState.SignBytes = nil + pv.Save() +} + +// String returns a string representation of the FilePV. +func (pv *FilePV) String() string { + return fmt.Sprintf( + "PrivValidator{%v LH:%v, LR:%v, LS:%v}", + pv.GetAddress(), + pv.LastSignState.Height, + pv.LastSignState.Round, + pv.LastSignState.Step, + ) +} + +//------------------------------------------------------------------------------------ + +// signVote checks if the vote is good to sign and sets the vote signature. +// It may need to set the timestamp as well if the vote is otherwise the same as +// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). +func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { + height, round, step := vote.Height, vote.Round, voteToStep(vote) + + lss := pv.LastSignState + + _, err := lss.CheckHRS(height, round, step) + if err != nil { + return err + } + + signBytes := types.VoteSignBytes(chainID, vote) + + // It passed the checks. Sign the vote + sig, err := pv.Key.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + vote.Signature = sig + return nil +} + +// signProposal checks if the proposal is good to sign and sets the proposal signature. +// It may need to set the timestamp as well if the proposal is otherwise the same as +// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). +func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error { + height, round, step := proposal.Height, proposal.Round, stepPropose + + lss := pv.LastSignState + + _, err := lss.CheckHRS(height, round, step) + if err != nil { + return err + } + + signBytes := types.ProposalSignBytes(chainID, proposal) + + // It passed the checks. Sign the proposal + sig, err := pv.Key.PrivKey.Sign(signBytes) + if err != nil { + return err + } + pv.saveSigned(height, round, step, signBytes, sig) + proposal.Signature = sig + return nil +} + +// Persist height/round/step and signature +func (pv *FilePV) saveSigned(height int64, round int32, step int8, + signBytes []byte, sig []byte) { + + pv.LastSignState.Height = height + pv.LastSignState.Round = round + pv.LastSignState.Step = step + pv.LastSignState.Signature = sig + pv.LastSignState.SignBytes = signBytes + pv.LastSignState.Save() +} diff --git a/test/p2p/README.md b/test/p2p/README.md deleted file mode 100644 index 1ebf4c17f..000000000 --- a/test/p2p/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Tendermint P2P Tests - -These scripts facilitate setting up and testing a local testnet using docker containers. - -Setup your own local testnet as follows. - -For consistency, we assume all commands are run from the Tendermint repository root. - -First, build the docker image: - -``` -docker build -t tendermint_tester -f ./test/docker/Dockerfile . -``` - -Now create the docker network: - -``` -docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet -``` - -This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`. -Peers on the network can have any IP address in this range. -For our four node network, let's pick `172.57.0.101 - 172.57.0.104`. -Since we use Tendermint's default listening port of 26656, our list of seed nodes will look like: - -``` -172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 -``` - -Now we can start up the peers. We already have config files setup in `test/p2p/data/`. -Let's use a for-loop to start our peers: - -``` -for i in $(seq 1 4); do - docker run -d \ - --net=my_testnet\ - --ip="172.57.0.$((100 + $i))" \ - --name local_testnet_$i \ - --entrypoint tendermint \ - -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((i-1)) \ - tendermint_tester node --p2p.persistent_peers 172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 --proxy_app=kvstore -done -``` - -If you now run `docker ps`, you'll see your containers! - -We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json: - -``` -curl 172.57.0.101:26657/status | jq . -``` - -## IPv6 tests - -IPv6 tests require a Docker daemon with IPv6 enabled, by setting the following in `daemon.json`: - -```json -{ - "ipv6": true, - "fixed-cidr-v6": "2001:db8:1::/64" -} -``` - -In Docker for Mac, this is done via Preferences → Docker Engine. - -Once set, run IPv6 tests via `make test_p2p_ipv6`. \ No newline at end of file diff --git a/test/p2p/address.sh b/test/p2p/address.sh deleted file mode 100755 index 0b0248db2..000000000 --- a/test/p2p/address.sh +++ /dev/null @@ -1,28 +0,0 @@ -#! /bin/bash -set -eu - -IPV=$1 -ID=$2 -PORT=${3:-} -DOCKER_IMAGE=${4:-} - -if [[ "$IPV" == 6 ]]; then - ADDRESS="fd80:b10c::" -else - ADDRESS="172.57.0." -fi -ADDRESS="$ADDRESS$((100+$ID))" - -if [[ -n "$PORT" ]]; then - if [[ "$IPV" == 6 ]]; then - ADDRESS="[$ADDRESS]" - fi - ADDRESS="$ADDRESS:$PORT" -fi - -if [[ -n "$DOCKER_IMAGE" ]]; then - NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1)) $DOCKER_IMAGE tendermint show_node_id)" - ADDRESS="$NODEID@$ADDRESS" -fi - -echo $ADDRESS \ No newline at end of file diff --git a/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh deleted file mode 100644 index a93067c3d..000000000 --- a/test/p2p/atomic_broadcast/test.sh +++ /dev/null @@ -1,76 +0,0 @@ -#! /bin/bash -set -u - -IPV=$1 -N=$2 - -################################################################### -# assumes peers are already synced up -# test sending txs -# for each peer: -# send a tx, wait for commit -# assert app hash on every peer reflects the post tx state -################################################################### - -echo "" -# run the test on each of them -for i in $(seq 1 "$N"); do - addr=$(test/p2p/address.sh $IPV $i 26657) - - # current state - HASH1=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) - - # - send a tx - TX=aadeadbeefbeefbeef0$i - echo "Broadcast Tx $TX" - curl -s "$addr/broadcast_tx_commit?tx=0x$TX" - echo "" - - # we need to wait another block to get the new app_hash - h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | jq fromjson) - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | jq fromjson) - done - - # wait for all other peers to get to this height - minHeight=$h2 - for j in $(seq 1 "$N"); do - if [[ "$i" != "$j" ]]; then - addrJ=$(test/p2p/address.sh $IPV $j 26657) - - h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height | jq fromjson) - while [ "$h" -lt "$minHeight" ]; do - sleep 1 - h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height | jq fromjson) - done - fi - done - - # check that hash was updated - HASH2=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) - if [[ "$HASH1" == "$HASH2" ]]; then - echo "Expected state hash to update from $HASH1. Got $HASH2" - exit 1 - fi - - # check we get the same new hash on all other nodes - for j in $(seq 1 "$N"); do - if [[ "$i" != "$j" ]]; then - addrJ=$(test/p2p/address.sh $IPV $j 26657) - HASH3=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_app_hash) - - if [[ "$HASH2" != "$HASH3" ]]; then - echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2" - exit 1 - fi - fi - done - - echo "All nodes are up to date" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh deleted file mode 100755 index 676b5cbe6..000000000 --- a/test/p2p/basic/test.sh +++ /dev/null @@ -1,76 +0,0 @@ -#! /bin/bash -set -u - -IPV=$1 -N=$2 - -################################################################### -# wait for all peers to come online -# for each peer: -# wait to have N-1 peers -# wait to be at height > 1 -################################################################### - -# wait 60s per step per peer -MAX_SLEEP=60 - -# wait for everyone to come online -echo "Waiting for nodes to come online" -for i in `seq 1 $N`; do - addr=$(test/p2p/address.sh $IPV $i 26657) - curl -s $addr/status > /dev/null - ERR=$? - COUNT=0 - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $addr/status > /dev/null - ERR=$? - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to come online" - exit 1 - fi - done - echo "... node $i is up" -done - -echo "" -# wait for each of them to sync up -for i in `seq 1 $N`; do - addr=$(test/p2p/address.sh $IPV $i 26657) - N_1=$(($N - 1)) - - # - assert everyone has N-1 other peers - N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` - COUNT=0 - while [ "$N_PEERS" != $N_1 ]; do - echo "Waiting for node $i to connect to all peers ..." - sleep 1 - N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to connect to all peers" - exit 1 - fi - done - - # - assert block height is greater than 1 - BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` - COUNT=0 - echo "$$BLOCK_HEIGHT IS $BLOCK_HEIGHT" - while [ "$BLOCK_HEIGHT" -le 1 ]; do - echo "Waiting for node $i to commit a block ..." - sleep 1 - BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to commit a block" - exit 1 - fi - done - echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/circleci.sh b/test/p2p/circleci.sh deleted file mode 100644 index fe8a972bd..000000000 --- a/test/p2p/circleci.sh +++ /dev/null @@ -1,55 +0,0 @@ -#! /bin/bash -set -eux - -# Take IP version as parameter -IPV="${1:-4}" - -# Get the directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -# Enable IPv6 support in Docker daemon -if [[ "$IPV" == "6" ]]; then - echo - echo "* [$(date +"%T")] enabling IPv6 stack in Docker daemon" - cat <<'EOF' | sudo tee /etc/docker/daemon.json -{ - "ipv6": true, - "fixed-cidr-v6": "2001:db8:1::/64" -} -EOF - sudo service docker restart -fi - -LOGS_DIR="$DIR/logs" -echo -echo "* [$(date +"%T")] cleaning up $LOGS_DIR" -rm -rf "$LOGS_DIR" -mkdir -p "$LOGS_DIR" - -set +e -echo -echo "* [$(date +"%T")] removing run_test container" -docker rm -vf run_test -set -e - -echo -echo "* [$(date +"%T")] starting rsyslog container" -docker rm -f rsyslog || true -docker run -d -v "$LOGS_DIR:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - -set +u -if [[ "$SKIP_BUILD" == "" ]]; then - echo - echo "* [$(date +"%T")] building docker image" - bash "$DIR/../docker/build.sh" -fi - -echo -echo "* [$(date +"%T")] running IPv$IPV p2p tests on a local docker network" -bash "$DIR/../p2p/test.sh" tester $IPV - -echo -echo "* [$(date +"%T")] copying log files out of docker container into $LOGS_DIR" -docker cp rsyslog:/var/log $LOGS_DIR diff --git a/test/p2p/client.sh b/test/p2p/client.sh deleted file mode 100644 index b3c907fba..000000000 --- a/test/p2p/client.sh +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -ID=$4 -CMD=$5 - -NAME=test_container_$ID - -if [[ "$IPV" == 6 ]]; then - IP_SWITCH="--ip6" -else - IP_SWITCH="--ip" -fi - -echo "starting test client container with CMD=$CMD" -# run the test container on the local network -docker run -t --rm \ - -v "$PWD/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p" \ - --net="$NETWORK_NAME" \ - $IP_SWITCH=$(test/p2p/address.sh $IPV -1) \ - --name "$NAME" \ - --entrypoint bash \ - "$DOCKER_IMAGE" $CMD diff --git a/test/p2p/fast_sync/check_peer.sh b/test/p2p/fast_sync/check_peer.sh deleted file mode 100644 index 798b508fa..000000000 --- a/test/p2p/fast_sync/check_peer.sh +++ /dev/null @@ -1,44 +0,0 @@ -#! /bin/bash -set -eu -set -o pipefail - -IPV=$1 -ID=$2 - -########################################### -# -# Wait for peer to catchup to other peers -# -########################################### - -addr=$(test/p2p/address.sh $IPV $ID 26657) -peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1 -peer_addr=$(test/p2p/address.sh $IPV $peerID 26657) - -# get another peer's height -h1=`curl -s $peer_addr/status | jq .result.sync_info.latest_block_height | jq fromjson` - -# get another peer's state -root1=`curl -s $peer_addr/status | jq .result.sync_info.latest_app_hash` - -echo "Other peer is on height $h1 with state $root1" -echo "Waiting for peer $ID to catch up" - -# wait for it to sync to past its previous height -set +e -set +o pipefail -h2="0" -while [[ "$h2" -lt "$(($h1+3))" ]]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` - echo "... $h2" -done - -# check the app hash -root2=`curl -s $addr/status | jq .result.sync_info.latest_app_hash` - -if [[ "$root1" != "$root2" ]]; then - echo "App hash after fast sync does not match. Got $root2; expected $root1" - exit 1 -fi -echo "... fast sync successful" diff --git a/test/p2p/fast_sync/test.sh b/test/p2p/fast_sync/test.sh deleted file mode 100644 index 79655232f..000000000 --- a/test/p2p/fast_sync/test.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -N=$4 -PROXY_APP=$5 - -# run it on each of them -for i in `seq 1 $N`; do - bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $IPV $i $N $PROXY_APP -done - - diff --git a/test/p2p/fast_sync/test_peer.sh b/test/p2p/fast_sync/test_peer.sh deleted file mode 100644 index b4c34336f..000000000 --- a/test/p2p/fast_sync/test_peer.sh +++ /dev/null @@ -1,39 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -ID=$4 -N=$5 -PROXY_APP=$6 - -############################################################### -# this runs on each peer: -# kill peer -# bring it back online via fast sync -# wait for it to sync and check the app hash -############################################################### - - -echo "Testing fastsync on node $ID" - -# kill peer -set +e # circle sigh :( - docker rm -vf local_testnet_$ID - set -e - - # restart peer - should have an empty blockchain - PERSISTENT_PEERS="$(test/p2p/address.sh $IPV 1 26656 $DOCKER_IMAGE)" - for j in `seq 2 $N`; do - PERSISTENT_PEERS="$PERSISTENT_PEERS,$(test/p2p/address.sh $IPV $j 26656 $DOCKER_IMAGE)" - done - bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $IPV $ID $PROXY_APP "--p2p.persistent_peers $PERSISTENT_PEERS --p2p.pex --rpc.unsafe" - - # wait for peer to sync and check the app hash - bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $IPV fs_$ID "test/p2p/fast_sync/check_peer.sh $IPV $ID" - - echo "" - echo "PASS" - echo "" - diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh deleted file mode 100644 index 504cdeddd..000000000 --- a/test/p2p/kill_all/check_peers.sh +++ /dev/null @@ -1,50 +0,0 @@ -#! /bin/bash -set -eu - -IPV=$1 -NUM_OF_PEERS=$2 - -# how many attempts for each peer to catch up by height -MAX_ATTEMPTS_TO_CATCH_UP=120 - -echo "Waiting for nodes to come online" -set +e -for i in $(seq 1 "$NUM_OF_PEERS"); do - addr=$(test/p2p/address.sh $IPV $i 26657) - curl -s "$addr/status" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s "$addr/status" > /dev/null - ERR=$? - done - echo "... node $i is up" -done -set -e - -# get the first peer's height -addr=$(test/p2p/address.sh $IPV 1 26657) -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") -echo "1st peer is on height $h1" - -echo "Waiting until other peers reporting a height higher than the 1st one" -for i in $(seq 2 "$NUM_OF_PEERS"); do - attempt=1 - hi=0 - - while [[ $hi -le $h1 ]] ; do - addr=$(test/p2p/address.sh $IPV $i 26657) - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") - - echo "... peer $i is on height $hi" - - ((attempt++)) - if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then - echo "$attempt unsuccessful attempts were made to catch up" - curl -s "$addr/dump_consensus_state" | jq .result - exit 1 - fi - - sleep 1 - done -done diff --git a/test/p2p/kill_all/test.sh b/test/p2p/kill_all/test.sh deleted file mode 100644 index 755612130..000000000 --- a/test/p2p/kill_all/test.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -NUM_OF_PEERS=$4 -NUM_OF_CRASHES=$5 - -############################################################### -# NUM_OF_CRASHES times: -# restart all peers -# wait for them to sync and check that they are making progress -############################################################### - -for i in $(seq 1 "$NUM_OF_CRASHES"); do - echo "" - echo "Restarting all peers! Take $i ..." - - # restart all peers - for j in $(seq 1 "$NUM_OF_PEERS"); do - docker stop "local_testnet_$j" - docker start "local_testnet_$j" - done - - bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" kill_all_$i "test/p2p/kill_all/check_peers.sh $IPV $NUM_OF_PEERS" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/local_testnet_start.sh b/test/p2p/local_testnet_start.sh deleted file mode 100644 index 8da6be4bb..000000000 --- a/test/p2p/local_testnet_start.sh +++ /dev/null @@ -1,27 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -N=$4 -APP_PROXY=$5 - -set +u -PERSISTENT_PEERS=$6 -if [[ "$PERSISTENT_PEERS" != "" ]]; then - echo "PersistentPeers: $PERSISTENT_PEERS" - PERSISTENT_PEERS="--p2p.persistent_peers $PERSISTENT_PEERS" -fi -set -u - -# create docker network -if [[ $IPV == 6 ]]; then - docker network create --driver bridge --ipv6 --subnet fd80:b10c::/48 "$NETWORK_NAME" -else - docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME" -fi - -for i in $(seq 1 "$N"); do - bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" $IPV "$i" "$APP_PROXY" "$PERSISTENT_PEERS --p2p.pex --rpc.unsafe" -done diff --git a/test/p2p/local_testnet_stop.sh b/test/p2p/local_testnet_stop.sh deleted file mode 100644 index 1dace4694..000000000 --- a/test/p2p/local_testnet_stop.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash -set -u - -NETWORK_NAME=$1 -N=$2 - -for i in $(seq 1 "$N"); do - docker stop "local_testnet_$i" - docker rm -vf "local_testnet_$i" -done - -docker network rm "$NETWORK_NAME" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh deleted file mode 100644 index bf146ca1b..000000000 --- a/test/p2p/peer.sh +++ /dev/null @@ -1,53 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -ID=$4 -APP_PROXY=$5 - -set +u -NODE_FLAGS=$6 -set -u - -if [[ "$IPV" == 6 ]]; then - IP_SWITCH="--ip6" -else - IP_SWITCH="--ip" -fi - -echo "starting tendermint peer ID=$ID" -# start tendermint container on the network -# NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be -# treated as one flag. - -# test/p2p/data/mach$((ID-1)) data is generated in test/docker/Dockerfile using -# the tendermint testnet command. -if [[ "$ID" == "x" ]]; then # Set "x" to "1" to print to console. - docker run \ - --net="$NETWORK_NAME" \ - $IP_SWITCH=$(test/p2p/address.sh $IPV $ID) \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ - -e GOMAXPROCS=1 \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & -else - docker run -d \ - --net="$NETWORK_NAME" \ - $IP_SWITCH=$(test/p2p/address.sh $IPV $ID) \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ - -e GOMAXPROCS=1 \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" -fi diff --git a/test/p2p/persistent_peers.sh b/test/p2p/persistent_peers.sh deleted file mode 100644 index a1e76991a..000000000 --- a/test/p2p/persistent_peers.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash -set -eu - -IPV=$1 -N=$2 -DOCKER_IMAGE=$3 - -persistent_peers="$(test/p2p/address.sh $IPV 1 26656 $DOCKER_IMAGE)" -for i in $(seq 2 $N); do - persistent_peers="$persistent_peers,$(test/p2p/address.sh $IPV $i 26656 $DOCKER_IMAGE)" -done -echo "$persistent_peers" diff --git a/test/p2p/pex/check_peer.sh b/test/p2p/pex/check_peer.sh deleted file mode 100644 index 93499d9f9..000000000 --- a/test/p2p/pex/check_peer.sh +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/bash -set -u - -IPV=$1 -ID=$2 -N=$3 - -addr=$(test/p2p/address.sh $IPV "$ID" 26657) - -echo "2. wait until peer $ID connects to other nodes using pex reactor" -peers_count="0" -while [[ "$peers_count" -lt "$((N-1))" ]]; do - sleep 1 - peers_count=$(curl -s "$addr/net_info" | jq ".result.peers | length") - echo "... peers count = $peers_count, expected = $((N-1))" -done - -echo "... successful" diff --git a/test/p2p/pex/dial_peers.sh b/test/p2p/pex/dial_peers.sh deleted file mode 100644 index 8c4d40f44..000000000 --- a/test/p2p/pex/dial_peers.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash -set -u - -IPV=$1 -N=$2 -PEERS=$3 - -echo "Waiting for nodes to come online" -for i in $(seq 1 "$N"); do - addr=$(test/p2p/address.sh $IPV $i 26657) - curl -s "$addr/status" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s "$addr/status" > /dev/null - ERR=$? - done - echo "... node $i is up" -done - -ADDR=$(test/p2p/address.sh $IPV 1 26657) -curl "$ADDR/dial_peers?persistent=true&peers=\\[$PEERS\\]" diff --git a/test/p2p/pex/test.sh b/test/p2p/pex/test.sh deleted file mode 100644 index 1e87a9fa5..000000000 --- a/test/p2p/pex/test.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -N=$4 -PROXY_APP=$5 - -echo "Test reconnecting from the address book" -bash test/p2p/pex/test_addrbook.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" "$PROXY_APP" - -echo "Test connecting via /dial_peers" -bash test/p2p/pex/test_dial_peers.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" "$PROXY_APP" diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh deleted file mode 100644 index 06fc4215f..000000000 --- a/test/p2p/pex/test_addrbook.sh +++ /dev/null @@ -1,67 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -N=$4 -PROXY_APP=$5 - -ID=1 - -echo "----------------------------------------------------------------------" -echo "Testing pex creates the addrbook and uses it if persistent_peers are not provided" -echo "(assuming peers are started with pex enabled)" - -CLIENT_NAME="pex_addrbook_$ID" - -echo "1. restart peer $ID" -docker stop "local_testnet_$ID" -echo "stopped local_testnet_$ID" -# preserve addrbook.json -docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" "/tmp/addrbook.json" -set +e #CIRCLE -docker rm -vf "local_testnet_$ID" -set -e - -# NOTE that we do not provide persistent_peers -bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" $IPV "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" -echo "started local_testnet_$ID" - -# if the client runs forever, it means addrbook wasn't saved or was empty -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $IPV $ID $N" - -# Now we know that the node is up. - -docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" -echo "with the following addrbook:" -cat /tmp/addrbook.json -# exec doesn't work on circle -# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" -echo "" - -echo "----------------------------------------------------------------------" -echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" -echo "(assuming peers are started with pex enabled)" - -CLIENT_NAME="pex_no_addrbook_$ID" - -echo "1. restart peer $ID" -docker stop "local_testnet_$ID" -echo "stopped local_testnet_$ID" -set +e #CIRCLE -docker rm -vf "local_testnet_$ID" -set -e - -# NOTE that we do not provide persistent_peers -bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" $IPV "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" -echo "started local_testnet_$ID" - -# if the client runs forever, it means other peers have removed us from their books (which should not happen) -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $IPV $ID $N" - -# Now we know that the node is up. - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/pex/test_dial_peers.sh b/test/p2p/pex/test_dial_peers.sh deleted file mode 100644 index af76a5699..000000000 --- a/test/p2p/pex/test_dial_peers.sh +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -IPV=$3 -N=$4 -PROXY_APP=$5 - -ID=1 - -echo "----------------------------------------------------------------------" -echo "Testing full network connection using one /dial_peers call" -echo "(assuming peers are started with pex enabled)" - -# stop the existing testnet and remove local network -set +e -bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N -set -e - -# start the testnet on a local network -# NOTE we re-use the same network for all tests -bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $IPV $N $PROXY_APP "" - -PERSISTENT_PEERS="\"$(test/p2p/address.sh $IPV 1 26656 $DOCKER_IMAGE)\"" -for i in $(seq 2 $N); do - PERSISTENT_PEERS="$PERSISTENT_PEERS,\"$(test/p2p/address.sh $IPV $i 26656 $DOCKER_IMAGE)\"" -done -echo "$PERSISTENT_PEERS" - -# dial peers from one node -CLIENT_NAME="dial_peers" -bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $IPV $CLIENT_NAME "test/p2p/pex/dial_peers.sh $IPV $N $PERSISTENT_PEERS" - -# test basic connectivity and consensus -# start client container and check the num peers and height for all nodes -CLIENT_NAME="dial_peers_basic" -bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $IPV $CLIENT_NAME "test/p2p/basic/test.sh $IPV $N" diff --git a/test/p2p/test.sh b/test/p2p/test.sh deleted file mode 100644 index fe28f02a9..000000000 --- a/test/p2p/test.sh +++ /dev/null @@ -1,42 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=local_testnet -N=4 -PROXY_APP=persistent_kvstore -IPV=${2:-4} # Default to IPv4 - -if [[ "$IPV" != "4" && "$IPV" != "6" ]]; then - echo "IP version must be 4 or 6" >&2 - exit 1 -fi - -# stop the existing testnet and remove local network -set +e -bash test/p2p/local_testnet_stop.sh "$NETWORK_NAME" "$N" -set -e - -PERSISTENT_PEERS=$(bash test/p2p/persistent_peers.sh $IPV $N $DOCKER_IMAGE) - -# start the testnet on a local network -# NOTE we re-use the same network for all tests -bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" "$PROXY_APP" "$PERSISTENT_PEERS" - -# test basic connectivity and consensus -# start client container and check the num peers and height for all nodes -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" basic "test/p2p/basic/test.sh $IPV $N" - -# test atomic broadcast: -# start client container and test sending a tx to each node -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" ab "test/p2p/atomic_broadcast/test.sh $IPV $N" - -# test fast sync (from current state of network): -# for each node, kill it and readd via fast sync -bash test/p2p/fast_sync/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" "$PROXY_APP" - -# test killing all peers 3 times -bash test/p2p/kill_all/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" 3 - -# test pex -bash test/p2p/pex/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$IPV" "$N" "$PROXY_APP" diff --git a/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh deleted file mode 100644 index 4d523d943..000000000 --- a/test/persist/test_failure_indices.sh +++ /dev/null @@ -1,124 +0,0 @@ -#! /bin/bash - -export PATH="$GOBIN:$PATH" -export TMHOME=$HOME/.tendermint_persist - -rm -rf "$TMHOME" -tendermint init - -# use a unix socket so we can remove it -RPC_ADDR="$(pwd)/rpc.sock" - -TM_CMD="tendermint node --log_level=debug --rpc.laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log" -DUMMY_CMD="abci-cli kvstore --persist $TMHOME/kvstore" # &> kvstore_${name}.log" - - -function start_procs(){ - name=$1 - indexToFail=$2 - echo "Starting persistent kvstore and tendermint" - if [[ "$CIRCLECI" == true ]]; then - $DUMMY_CMD & - else - $DUMMY_CMD &> "kvstore_${name}.log" & - fi - PID_DUMMY=$! - - # before starting tendermint, remove the rpc socket - rm -f $RPC_ADDR - if [[ "$indexToFail" == "" ]]; then - # run in background, dont fail - if [[ "$CIRCLECI" == true ]]; then - $TM_CMD & - else - $TM_CMD &> "tendermint_${name}.log" & - fi - PID_TENDERMINT=$! - else - # run in foreground, fail - if [[ "$CIRCLECI" == true ]]; then - FAIL_TEST_INDEX=$indexToFail $TM_CMD - else - FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log" - fi - PID_TENDERMINT=$! - fi -} - -function kill_procs(){ - kill -9 "$PID_DUMMY" "$PID_TENDERMINT" - wait "$PID_DUMMY" - wait "$PID_TENDERMINT" -} - -# wait for port to be available -function wait_for_port() { - port=$1 - # this will succeed while port is bound - nc -z 127.0.0.1 $port - ERR=$? - i=0 - while [ "$ERR" == 0 ]; do - echo "... port $port is still bound. waiting ..." - sleep 1 - nc -z 127.0.0.1 $port - ERR=$? - i=$((i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for port to be released" - exit 1 - fi - done - echo "... port $port is free!" -} - - -failsStart=0 -fails=$(grep -r "fail.Fail" --include \*.go . | wc -l) -failsEnd=$((fails-1)) - -for failIndex in $(seq $failsStart $failsEnd); do - echo "" - echo "* Test FailIndex $failIndex" - # test failure at failIndex - - bash $(dirname $0)/txs.sh "localhost:26657" & - start_procs 1 "$failIndex" - - # tendermint should already have exited when it hits the fail index - # but kill -9 for good measure - kill_procs - - start_procs 2 - - # wait for node to handshake and make a new block - # NOTE: --unix-socket is only available in curl v7.40+ - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=0 - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=$((i + 1)) - if [[ $i == 20 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi - done - - # wait for a new block - h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) - done - - kill_procs - - echo "* Passed Test for FailIndex $failIndex" - echo "" -done - -echo "Passed Test: Persistence" diff --git a/test/persist/test_simple.sh b/test/persist/test_simple.sh deleted file mode 100644 index 706e04c26..000000000 --- a/test/persist/test_simple.sh +++ /dev/null @@ -1,70 +0,0 @@ -#! /bin/bash - - -export TMHOME=$HOME/.tendermint_persist - -rm -rf $TMHOME -tendermint init - -function start_procs(){ - name=$1 - echo "Starting persistent kvstore and tendermint" - abci-cli kvstore --persist $TMHOME/kvstore &> "kvstore_${name}.log" & - PID_DUMMY=$! - tendermint node &> tendermint_${name}.log & - PID_TENDERMINT=$! - sleep 5 -} - -function kill_procs(){ - kill -9 $PID_DUMMY $PID_TENDERMINT -} - - -function send_txs(){ - # send a bunch of txs over a few blocks - echo "Sending txs" - for i in `seq 1 5`; do - for j in `seq 1 100`; do - tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` - curl -s 127.0.0.1:26657/broadcast_tx_async?tx=0x$tx &> /dev/null - done - sleep 1 - done -} - - -start_procs 1 -send_txs -kill_procs - -start_procs 2 - -# wait for node to handshake and make a new block -addr="localhost:26657" -curl -s $addr/status > /dev/null -ERR=$? -i=0 -while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $addr/status > /dev/null - ERR=$? - i=$(($i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi -done - -# wait for a new block -h1=`curl -s $addr/status | jq .result.sync_info.latest_block_height` -h2=$h1 -while [ "$h2" == "$h1" ]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height` -done - -kill_procs -sleep 2 - -echo "Passed Test: Persistence" diff --git a/test/persist/txs.sh b/test/persist/txs.sh deleted file mode 100644 index 120aa8a56..000000000 --- a/test/persist/txs.sh +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -set -u - -# wait till node is up, send txs -ADDR=$1 #="127.0.0.1:26657" -curl -s $ADDR/status > /dev/null -ERR=$? -while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $ADDR/status > /dev/null - ERR=$? -done - -# send a bunch of txs over a few blocks -echo "Node is up, sending txs" -for i in $(seq 1 5); do - for _ in $(seq 1 100); do - tx=$(head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"') - curl -s "$ADDR/broadcast_tx_async?tx=0x$tx" &> /dev/null - done - echo "sent 100" - sleep 1 -done diff --git a/tests.mk b/tests.mk deleted file mode 100644 index d7e305312..000000000 --- a/tests.mk +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/make -f - -######################################## -### Testing - -BINDIR ?= $(GOPATH)/bin - -## required to be run first by most tests -build_docker_test_image: - docker build -t tester -f ./test/docker/Dockerfile . -.PHONY: build_docker_test_image - -### coverage, app, persistence, and libs tests -test_cover: - # run the go unit tests with coverage - bash test/test_cover.sh -.PHONY: test_cover - -test_apps: - # run the app tests using bash - # requires `abci-cli` and `tendermint` binaries installed - bash test/app/test.sh -.PHONY: test_apps - -test_abci_apps: - bash abci/tests/test_app/test.sh -.PHONY: test_abci_apps - -test_abci_cli: - # test the cli against the examples in the tutorial at: - # ./docs/abci-cli.md - # if test fails, update the docs ^ - @ bash abci/tests/test_cli/test.sh -.PHONY: test_abci_cli - -test_persistence: - # run the persistence tests using bash - # requires `abci-cli` installed - docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh - - # TODO undockerize - # bash test/persist/test_failure_indices.sh -.PHONY: test_persistence - -test_p2p: - docker rm -f rsyslog || true - rm -rf test/logs && mkdir -p test/logs - docker run -d -v "$(CURDIR)/test/logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - # requires 'tester' the image from above - bash test/p2p/test.sh tester - # the `docker cp` takes a really long time; uncomment for debugging - # - # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs -.PHONY: test_p2p - -test_p2p_ipv6: - # IPv6 tests require Docker daemon with IPv6 enabled, e.g. in daemon.json: - # - # { - # "ipv6": true, - # "fixed-cidr-v6": "2001:db8:1::/64" - # } - # - # Docker for Mac can set this via Preferences -> Docker Engine. - docker rm -f rsyslog || true - rm -rf test/logs && mkdir -p test/logs - docker run -d -v "$(CURDIR)/test/logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - # requires 'tester' the image from above - bash test/p2p/test.sh tester 6 - # the `docker cp` takes a really long time; uncomment for debugging - # - # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs -.PHONY: test_p2p_ipv6 - -test_integrations: - make build_docker_test_image - make tools - make install - make test_cover - make test_apps - make test_abci_apps - make test_abci_cli - make test_libs - make test_persistence - make test_p2p - # Disabled by default since it requires Docker daemon with IPv6 enabled - #make test_p2p_ipv6 -.PHONY: test_integrations - -test_release: - @go test -tags release $(PACKAGES) -.PHONY: test_release - -test100: - @for i in {1..100}; do make test; done -.PHONY: test100 - -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' -.PHONY: vagrant_test - -### go tests -test: - @echo "--> Running go test" - @go test -p 1 $(PACKAGES) -.PHONY: test - -test_race: - @echo "--> Running go test --race" - @go test -p 1 -v -race $(PACKAGES) -.PHONY: test_race - -# uses https://github.com/sasha-s/go-deadlock/ to detect potential deadlocks -test_with_deadlock: - make set_with_deadlock - make test - make cleanup_after_test_with_deadlock -.PHONY: test_with_deadlock - -set_with_deadlock: - @echo "Get Goid" - @go get github.com/petermattis/goid@b0b1615b78e5ee59739545bb38426383b2cda4c9 - @echo "Get Go-Deadlock" - @go get github.com/sasha-s/go-deadlock@d68e2bc52ae3291765881b9056f2c1527f245f1e - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.RWMutex/deadlock.RWMutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/sync.Mutex/deadlock.Mutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w -.PHONY: set_with_deadlock - -# cleanes up after you ran test_with_deadlock -cleanup_after_test_with_deadlock: - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.RWMutex/sync.RWMutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 sed -i.bak 's/deadlock.Mutex/sync.Mutex/' - find . -name "*.go" | grep -v "vendor/" | xargs -n 1 goimports -w - # cleans up the deps to not include the need libs - go mod tidy -.PHONY: cleanup_after_test_with_deadlock diff --git a/tools.mk b/tools/Makefile similarity index 68% rename from tools.mk rename to tools/Makefile index de722ae89..fa9081ce2 100644 --- a/tools.mk +++ b/tools/Makefile @@ -43,6 +43,11 @@ TOOLS_DESTDIR ?= $(GOPATH)/bin CERTSTRAP = $(TOOLS_DESTDIR)/certstrap PROTOBUF = $(TOOLS_DESTDIR)/protoc GOODMAN = $(TOOLS_DESTDIR)/goodman +BUF_VERSION = "0.30.0" +BINARY_NAME = "buf" +BIN = "/usr/local/bin" +OS = $(shell uname -s) +ARCH = $(shell uname -m) all: tools .PHONY: all @@ -68,9 +73,17 @@ $(CERTSTRAP): protobuf: $(PROTOBUF) $(PROTOBUF): @echo "Get GoGo Protobuf" - @go get github.com/gogo/protobuf/protoc-gen-gogo@v1.3.1 + @go get github.com/gogo/protobuf/protoc-gen-gogofaster@v1.3.1 .PHONY: protobuf +buf: + @echo "Install Buf" + curl -sSL \ + "https://github.com/bufbuild/buf/releases/download/v$(BUF_VERSION)/$(BINARY_NAME)-$(OS)-$(ARCH)" \ + -o "${BIN}/${BINARY_NAME}" && \ + chmod +x "${BIN}/${BINARY_NAME}" +.PHONY: buf + goodman: $(GOODMAN) $(GOODMAN): @echo "Get Goodman" @@ -80,32 +93,5 @@ $(GOODMAN): tools-clean: rm -f $(CERTSTRAP) $(PROTOBUF) $(GOX) $(GOODMAN) rm -f tools-stamp - rm -rf /usr/local/include/google/protobuf - rm -f /usr/local/bin/protoc + rm -f "${BIN}/${BINARY_NAME}" .PHONY: tooks-clean - -### -# Non Go tools -### - -# Choose protobuf binary based on OS (only works for 64bit Linux and Mac). -# NOTE: On Mac, installation via brew (brew install protoc) might be favorable. -PROTOC_ZIP="" -ifneq ($(OS),Windows_NT) - UNAME_S := $(shell uname -s) - ifeq ($(UNAME_S),Linux) - PROTOC_ZIP="protoc-3.10.1-linux-x86_64.zip" - endif - ifeq ($(UNAME_S),Darwin) - PROTOC_ZIP="protoc-3.10.1-osx-x86_64.zip" - endif -endif - -protoc: - @echo "Get Protobuf" - @echo "In case of any errors, please install directly from https://github.com/protocolbuffers/protobuf/releases" - @curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/$(PROTOC_ZIP) - @unzip -o $(PROTOC_ZIP) -d /usr/local bin/protoc - @unzip -o $(PROTOC_ZIP) -d /usr/local 'include/*' - @rm -f $(PROTOC_ZIP) -.PHONY: protoc diff --git a/tools/mintnet-kubernetes/examples/basecoin/Makefile b/tools/mintnet-kubernetes/examples/basecoin/Makefile deleted file mode 100644 index 6d54d57d6..000000000 --- a/tools/mintnet-kubernetes/examples/basecoin/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -create: - @echo "==> Creating deployment" - @kubectl create -f app.yaml - -destroy: - @echo "==> Destroying deployment" - @kubectl delete -f app.yaml - @kubectl delete pvc -l app=tm - -.PHONY: create destroy diff --git a/tools/mintnet-kubernetes/examples/basecoin/README.md b/tools/mintnet-kubernetes/examples/basecoin/README.md deleted file mode 100644 index 46911a096..000000000 --- a/tools/mintnet-kubernetes/examples/basecoin/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Basecoin example - -This is an example of using [basecoin](https://github.com/tendermint/basecoin). - -## Usage - -``` -make create -``` - -### Check account balance and send a transaction - -1. wait until all the pods are `Running`. - - ``` - kubectl get pods -w -o wide -L tm - ``` - -2. wait until app starts. - - ``` - kubectl logs -c app -f tm-0 - ``` - -3. get account's address of the second pod - - ``` - ADDR=`kubectl exec -c app tm-1 -- cat /app/key.json | jq ".address" | tr -d "\""` - ``` - -4. send 5 coins to it from the first pod - - ``` - kubectl exec -c app tm-0 -- basecoin tx send --to "0x$ADDR" --amount 5mycoin --from /app/key.json --chain_id chain-tTH4mi - ``` - - -## Clean up - -``` -make destroy -``` diff --git a/tools/mintnet-kubernetes/examples/basecoin/app.yaml b/tools/mintnet-kubernetes/examples/basecoin/app.yaml deleted file mode 100644 index 6206b1cdb..000000000 --- a/tools/mintnet-kubernetes/examples/basecoin/app.yaml +++ /dev/null @@ -1,334 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" - name: basecoin - labels: - app: basecoin -spec: - ports: - - port: 26656 - name: p2p - - port: 26657 - name: rpc - clusterIP: None - selector: - app: tm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: tm-config -data: - seeds: "tm-0,tm-1,tm-2,tm-3" - validators: "tm-0,tm-1,tm-2,tm-3" - validator.power: "10" - genesis.json: |- - { - "genesis_time": "2016-02-05T06:02:31.526Z", - "chain_id": "chain-tTH4mi", - "validators": [], - "app_hash": "" - } - pub_key_nginx.conf: |- - server { - listen 80 default_server; - listen [::]:80 default_server ipv6only=on; - location /pub_key.json { root /usr/share/nginx/; } - location /app_pub_key.json { root /usr/share/nginx/; } - } ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: app-config -data: - genesis.json: |- - { - "chain_id": "chain-tTH4mi", - "app_options": { - "accounts": [ - { - "pub_key": "tm-0", - "coins": [ - { - "denom": "mycoin", - "amount": 1000000000 - } - ] - }, - { - "pub_key": "tm-1", - "coins": [ - { - "denom": "mycoin", - "amount": 1000000000 - } - ] - }, - { - "pub_key": "tm-2", - "coins": [ - { - "denom": "mycoin", - "amount": 1000000000 - } - ] - }, - { - "pub_key": "tm-3", - "coins": [ - { - "denom": "mycoin", - "amount": 1000000000 - } - ] - } - ] - } - } ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: tm-budget -spec: - selector: - matchLabels: - app: tm - minAvailable: 2 ---- -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - name: tm -spec: - serviceName: basecoin - replicas: 4 - template: - metadata: - labels: - app: tm - annotations: - pod.beta.kubernetes.io/init-containers: '[{ - "name": "tm-gen-validator", - "image": "tendermint/tendermint:0.10.0", - "imagePullPolicy": "IfNotPresent", - "command": ["bash", "-c", " - set -ex\n - if [ ! -f /tendermint/priv_validator.json ]; then\n - tendermint gen_validator > /tendermint/priv_validator.json\n - # pub_key.json will be served by pub-key container\n - cat /tendermint/priv_validator.json | jq \".pub_key\" > /tendermint/pub_key.json\n - fi\n - "], - "volumeMounts": [ - {"name": "tmdir", "mountPath": "/tendermint"} - ] - }, - { - "name": "app-gen-key", - "image": "tendermint/basecoin:0.5.1", - "imagePullPolicy": "IfNotPresent", - "command": ["bash", "-c", " - set -ex\n - if [ ! -f /app/key.json ]; then\n - basecoin key new > /app/key.json\n - # pub_key.json will be served by app-pub-key container\n - cat /app/key.json | jq \".pub_key\" > /app/pub_key.json\n - fi\n - "], - "volumeMounts": [ - {"name": "appdir", "mountPath": "/app"} - ] - }]' - spec: - containers: - - name: tm - imagePullPolicy: IfNotPresent - image: tendermint/tendermint:0.10.0 - ports: - - containerPort: 26656 - name: p2p - - containerPort: 26657 - name: rpc - env: - - name: SEEDS - valueFrom: - configMapKeyRef: - name: tm-config - key: seeds - - name: VALIDATOR_POWER - valueFrom: - configMapKeyRef: - name: tm-config - key: validator.power - - name: VALIDATORS - valueFrom: - configMapKeyRef: - name: tm-config - key: validators - - name: TMHOME - value: /tendermint - command: - - bash - - "-c" - - | - set -ex - - # copy template - cp /etc/tendermint/genesis.json /tendermint/genesis.json - - # fill genesis file with validators - IFS=',' read -ra VALS_ARR <<< "$VALIDATORS" - fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') - for v in "${VALS_ARR[@]}"; do - # wait until validator generates priv/pub key pair - set +e - - curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 5 - curl -s --fail "http://$v.$fqdn_suffix/pub_key.json" > /dev/null - ERR=$? - done - set -e - - # add validator to genesis file along with its pub_key - curl -s "http://$v.$fqdn_suffix/pub_key.json" | jq ". as \$k | {pub_key: \$k, amount: $VALIDATOR_POWER, name: \"$v\"}" > pub_validator.json - cat /tendermint/genesis.json | jq ".validators |= .+ [$(cat pub_validator.json)]" > tmpgenesis && mv tmpgenesis /tendermint/genesis.json - rm pub_validator.json - done - - # construct seeds - IFS=',' read -ra SEEDS_ARR <<< "$SEEDS" - seeds=() - for s in "${SEEDS_ARR[@]}"; do - seeds+=("$s.$fqdn_suffix:26656") - done - seeds=$(IFS=','; echo "${seeds[*]}") - - tendermint node --p2p.seeds="$seeds" --moniker="`hostname`" --proxy_app="unix:///socks/app.sock" - volumeMounts: - - name: tmdir - mountPath: /tendermint - - mountPath: /etc/tendermint/genesis.json - name: tmconfigdir - subPath: genesis.json - - name: socksdir - mountPath: /socks - - - name: app - imagePullPolicy: IfNotPresent - image: tendermint/basecoin:0.5.1 - env: - - name: BCHOME - value: /app - workingDir: /app - command: - - bash - - "-c" - - | - set -ex - - # replace "tm-N" with public keys in genesis file - cp /etc/app/genesis.json genesis.json - fqdn_suffix=$(hostname -f | sed 's#[^.]*\.\(\)#\1#') - # for every "base/account" - i=0 - length=$(cat genesis.json | jq ".app_options.accounts | length") - while [[ $i -lt $length ]]; do - # extract pod name ("tm-0") - pod=$(cat genesis.json | jq -r ".app_options.accounts[$i].pub_key") - - # wait until pod starts to serve its pub_key - set +e - - curl -s --fail "http://$pod.$fqdn_suffix/app_pub_key.json" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 5 - curl -s --fail "http://$pod.$fqdn_suffix/app_pub_key.json" > /dev/null - ERR=$? - done - set -e - - # get its pub_key - curl -s "http://$pod.$fqdn_suffix/app_pub_key.json" | jq "." > k.json - - # replace pod name with it ("tm-0" => "{"type": ..., "data": ...}") - cat genesis.json | jq ".app_options.accounts[$i].pub_key = $(cat k.json | jq '.')" > tmpgenesis && mv tmpgenesis genesis.json - rm -f k.json - - i=$((i+1)) - done - - rm -f /socks/app.sock # remove old socket - - basecoin start --address="unix:///socks/app.sock" --without-tendermint - volumeMounts: - - name: appdir - mountPath: /app - - mountPath: /etc/app/genesis.json - name: appconfigdir - subPath: genesis.json - - name: socksdir - mountPath: /socks - - - name: pub-key - imagePullPolicy: IfNotPresent - image: nginx:latest - ports: - - containerPort: 80 - command: - - bash - - "-c" - - | - set -ex - # fixes 403 Permission Denied (open() "/tendermint/pub_key.json" failed (13: Permission denied)) - # => we cannot serve from /tendermint, so we copy the file - mkdir -p /usr/share/nginx - cp /tendermint/pub_key.json /usr/share/nginx/pub_key.json - cp /app/pub_key.json /usr/share/nginx/app_pub_key.json - nginx -g "daemon off;" - volumeMounts: - - name: tmdir - mountPath: /tendermint - - name: appdir - mountPath: /app - - mountPath: /etc/nginx/conf.d/pub_key.conf - name: tmconfigdir - subPath: pub_key_nginx.conf - - volumes: - - name: tmconfigdir - configMap: - name: tm-config - - name: appconfigdir - configMap: - name: app-config - - name: socksdir - emptyDir: {} - - volumeClaimTemplates: - - metadata: - name: tmdir - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi - - metadata: - name: appdir - annotations: - volume.alpha.kubernetes.io/storage-class: anything - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 12Mi diff --git a/tools/mintnet-kubernetes/examples/basecoin/lightclient.md b/tools/mintnet-kubernetes/examples/basecoin/lightclient.md deleted file mode 100644 index 11d07af1f..000000000 --- a/tools/mintnet-kubernetes/examples/basecoin/lightclient.md +++ /dev/null @@ -1,100 +0,0 @@ -**OUTDATED** - -# Using with lightclient - -We have an awesome cluster running, let's try to test this out without -relying on executing commands on the cluster. Rather, we can connect to the -rpc interface with the `light-client` package and execute commands locally, -or even proxy our webapp to the kubernetes backend. - -## Setup - -In order to get this working, we need to know a few pieces of info, -the chain id of tendermint, the chain id of basecoin, and an account -with a bit of cash.... - -### Tendermint Chain ID - -`kubectl exec -c tm tm-0 -- curl -s http://tm-1.basecoin:26657/status | json_pp | grep network` - -set TM_CHAIN with the value there - -### Basecoin Chain ID - -`kubectl exec -c app tm-1 -- grep -A1 chainID /app/genesis.json` - -set BC_CHAIN with the value there - -### Expose tendermint rpc - -We need to be able to reach the tendermint rpc interface from our shell. - -`kubectl port-forward tm-0 26657:26657` - -### Start basecoin-proxy - -Using this info, let's connect our proxy and get going - -`proxy-basecoin -tmchain=$TM_CHAIN -chain=$BC_CHAIN -rpc=localhost:26657` - -## Basecoin accounts - -Well, we can connect, but we don't have a registered account yet... -Let's look around, then use the cli to send some money from one of -the validators to our client's address so we can play. - -**TODO** we can add some of our known accounts (from `/keys`) into -the genesis file, so we can skip all the kubectl money fiddling here. -We will want to start with money on some known non-validators. - -### Getting validator info (kubectl) - -The basecoin app deployment starts with 1000 "blank" coin in an account of -each validator. Let's get the address of the first validator - -`kubectl exec -c app tm-1 -- grep address /app/key.json` - -Store this info as VAL1_ADDR - -### Querying state (proxy) - -The proxy can read any public info via the tendermint rpc, so let's check -out this account. - -`curl localhost:8108/query/account/$VAL1_ADDR` - -Now, let's make out own account.... - -`curl -XPOST http://localhost:8108/keys/ -d '{"name": "k8demo", "passphrase": "1234567890"}'` - -(or pick your own user and password). Remember the address you get here. You can -always find it out later by calling: - -`curl http://localhost:8108/keys/k8demo` - -and store it in DEMO_ADDR, which is empty at first - -`curl localhost:8108/query/account/$DEMO_ADDR` - - -### "Stealing" validator cash (kubectl) - -Run one command, that will be signed, now we have money - -`kubectl exec -c app tm-0 -- basecoin tx send --to --amount 500` - -### Using our money - -Returning to our remote shell, we have a remote account with some money. -Let's see that. - -`curl localhost:8108/query/account/$DEMO_ADDR` - -Cool. Now we need to send it to a second account. - -`curl -XPOST http://localhost:8108/keys/ -d '{"name": "buddy", "passphrase": "1234567890"}'` - -and store the resulting address in BUDDY_ADDR - -**TODO** finish this - diff --git a/tools/tm-signer-harness/Makefile b/tools/tm-signer-harness/Makefile index 47cd03650..1c404ebf8 100644 --- a/tools/tm-signer-harness/Makefile +++ b/tools/tm-signer-harness/Makefile @@ -2,7 +2,8 @@ TENDERMINT_VERSION?=latest BUILD_TAGS?='tendermint' -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +VERSION := $(shell git describe --always) +BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION) .DEFAULT_GOAL := build diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index f9d48fdcb..28a20caed 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -1,6 +1,7 @@ package internal import ( + "bytes" "fmt" "net" "os" @@ -16,6 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmos "github.com/tendermint/tendermint/libs/os" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -34,7 +36,7 @@ const ( ErrTestSignVoteFailed // 10 ) -var voteTypes = []types.SignedMsgType{types.PrevoteType, types.PrecommitType} +var voteTypes = []tmproto.SignedMsgType{tmproto.PrevoteType, tmproto.PrecommitType} // TestHarnessError allows us to keep track of which exit code should be used // when exiting the main program. @@ -72,7 +74,7 @@ type TestHarnessConfig struct { ConnDeadline time.Duration AcceptRetries int - SecretConnKey ed25519.PrivKeyEd25519 + SecretConnKey ed25519.PrivKey ExitWhenComplete bool // Whether or not to call os.Exit when the harness has completed. } @@ -107,7 +109,7 @@ func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, err return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } - signerClient, err := privval.NewSignerClient(spv) + signerClient, err := privval.NewSignerClient(spv, st.ChainID) if err != nil { return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") } @@ -200,7 +202,7 @@ func (th *TestHarness) TestPublicKey() error { return err } th.logger.Info("Remote", "pubKey", sck) - if fpvk != sck { + if !bytes.Equal(fpvk.Bytes(), sck.Bytes()) { th.logger.Error("FAILED: Local and remote public keys do not match") return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") } @@ -214,24 +216,26 @@ func (th *TestHarness) TestSignProposal() error { // sha256 hash of "hash" hash := tmhash.Sum([]byte("hash")) prop := &types.Proposal{ - Type: types.ProposalType, + Type: tmproto.ProposalType, Height: 100, Round: 0, POLRound: -1, BlockID: types.BlockID{ Hash: hash, - PartsHeader: types.PartSetHeader{ + PartSetHeader: types.PartSetHeader{ Hash: hash, Total: 1000000, }, }, Timestamp: time.Now(), } - propBytes := prop.SignBytes(th.chainID) - if err := th.signerClient.SignProposal(th.chainID, prop); err != nil { + p := prop.ToProto() + propBytes := types.ProposalSignBytes(th.chainID, p) + if err := th.signerClient.SignProposal(th.chainID, p); err != nil { th.logger.Error("FAILED: Signing of proposal", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } + prop.Signature = p.Signature th.logger.Debug("Signed proposal", "prop", prop) // first check that it's a basically valid proposal if err := prop.ValidateBasic(); err != nil { @@ -243,7 +247,7 @@ func (th *TestHarness) TestSignProposal() error { return err } // now validate the signature on the proposal - if sck.VerifyBytes(propBytes, prop.Signature) { + if sck.VerifySignature(propBytes, prop.Signature) { th.logger.Info("Successfully validated proposal signature") } else { th.logger.Error("FAILED: Proposal signature validation failed") @@ -265,7 +269,7 @@ func (th *TestHarness) TestSignVote() error { Round: 0, BlockID: types.BlockID{ Hash: hash, - PartsHeader: types.PartSetHeader{ + PartSetHeader: types.PartSetHeader{ Hash: hash, Total: 1000000, }, @@ -274,12 +278,14 @@ func (th *TestHarness) TestSignVote() error { ValidatorAddress: tmhash.SumTruncated([]byte("addr")), Timestamp: time.Now(), } - voteBytes := vote.SignBytes(th.chainID) + v := vote.ToProto() + voteBytes := types.VoteSignBytes(th.chainID, v) // sign the vote - if err := th.signerClient.SignVote(th.chainID, vote); err != nil { + if err := th.signerClient.SignVote(th.chainID, v); err != nil { th.logger.Error("FAILED: Signing of vote", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } + vote.Signature = v.Signature th.logger.Debug("Signed vote", "vote", vote) // validate the contents of the vote if err := vote.ValidateBasic(); err != nil { @@ -292,7 +298,7 @@ func (th *TestHarness) TestSignVote() error { } // now validate the signature on the proposal - if sck.VerifyBytes(voteBytes, vote.Signature) { + if sck.VerifySignature(voteBytes, vote.Signature) { th.logger.Info("Successfully validated vote signature", "type", voteType) } else { th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index aebd9e341..cf22bc836 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -23,7 +23,7 @@ const ( "pub_key": { "type": "tendermint/PubKeyEd25519", "value": "ZCsuTjaczEyon70nmKxwvwu+jqrbq5OH3yQjcK0SFxc=" - }, + }, "priv_key": { "type": "tendermint/PrivKeyEd25519", "value": "8O39AkQsoe1sBQwud/Kdul8lg8K9SFsql9aZvwXQSt1kKy5ONpzMTKifvSeYrHC/C76Oqturk4ffJCNwrRIXFw==" @@ -32,7 +32,7 @@ const ( stateFileContents = `{ "height": "0", - "round": "0", + "round": 0, "step": 0 }` @@ -47,7 +47,8 @@ const ( }, "evidence": { "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000" + "max_age_duration": "172800000000000", + "max_num": 50 }, "validator": { "pub_key_types": [ @@ -159,7 +160,7 @@ func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval. ss := signerServerMaker(th) require.NoError(t, ss.Start()) assert.True(t, ss.IsRunning()) - defer ss.Stop() + defer ss.Stop() //nolint:errcheck // ignore for tests <-donec assert.Equal(t, expectedExitCode, th.exitCode) diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go index bec0d4b39..d624234ae 100644 --- a/tools/tm-signer-harness/main.go +++ b/tools/tm-signer-harness/main.go @@ -135,8 +135,8 @@ func extractKey(tmhome, outputPath string) { keyFile := filepath.Join(internal.ExpandPath(tmhome), "config", "priv_validator_key.json") stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json") fpv := privval.LoadFilePV(keyFile, stateFile) - pkb := [64]byte(fpv.Key.PrivKey.(ed25519.PrivKeyEd25519)) - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0644); err != nil { + pkb := []byte(fpv.Key.PrivKey.(ed25519.PrivKey)) + if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { logger.Info("Failed to write private key", "output", outputPath, "err", err) os.Exit(1) } @@ -144,7 +144,10 @@ func extractKey(tmhome, outputPath string) { } func main() { - rootCmd.Parse(os.Args[1:]) + if err := rootCmd.Parse(os.Args[1:]); err != nil { + fmt.Printf("Error parsing flags: %v\n", err) + os.Exit(1) + } if rootCmd.NArg() == 0 || (rootCmd.NArg() == 1 && rootCmd.Arg(0) == "help") { rootCmd.Usage() os.Exit(0) @@ -166,13 +169,19 @@ func main() { os.Exit(1) } case "run": - runCmd.Parse(os.Args[2:]) + if err := runCmd.Parse(os.Args[2:]); err != nil { + fmt.Printf("Error parsing flags: %v\n", err) + os.Exit(1) + } runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome) case "extract_key": - extractKeyCmd.Parse(os.Args[2:]) + if err := extractKeyCmd.Parse(os.Args[2:]); err != nil { + fmt.Printf("Error parsing flags: %v\n", err) + os.Exit(1) + } extractKey(flagTMHome, flagKeyOutputPath) case "version": - fmt.Println(version.Version) + fmt.Println(version.TMCoreSemVer) default: fmt.Printf("Unrecognized command: %s\n", flag.Arg(0)) os.Exit(1) diff --git a/types/block.go b/types/block.go index 61ae6719b..a9b86eee6 100644 --- a/types/block.go +++ b/types/block.go @@ -2,12 +2,13 @@ package types import ( "bytes" + "errors" "fmt" "strings" - "sync" "time" - "github.com/pkg/errors" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" @@ -15,28 +16,33 @@ import ( "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/version" ) const ( - // MaxHeaderBytes is a maximum header size (including amino overhead). - MaxHeaderBytes int64 = 632 + // MaxHeaderBytes is a maximum header size. + // NOTE: Because app hash can be of arbitrary size, the header is therefore not + // capped in size and thus this number should be seen as a soft max + MaxHeaderBytes int64 = 626 - // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to + // MaxOverheadForBlock - maximum overhead to encode a block (up to // MaxBlockSizeBytes in size) not including it's parts except Data. // This means it also excludes the overhead for individual transactions. - // To compute individual transactions' overhead use types.ComputeAminoOverhead(tx types.Tx, fieldNum int). // // Uvarint length of MaxBlockSizeBytes: 4 bytes // 2 fields (2 embedded): 2 bytes // Uvarint length of Data.Txs: 4 bytes // Data.Txs field: 1 byte - MaxAminoOverheadForBlock int64 = 11 + MaxOverheadForBlock int64 = 11 ) // Block defines the atomic unit of a Tendermint blockchain. type Block struct { - mtx sync.Mutex + mtx tmsync.Mutex + Header `json:"header"` Data `json:"data"` Evidence EvidenceData `json:"evidence"` @@ -50,94 +56,40 @@ func (b *Block) ValidateBasic() error { if b == nil { return errors.New("nil block") } + b.mtx.Lock() defer b.mtx.Unlock() - if len(b.ChainID) > MaxChainIDLen { - return fmt.Errorf("chainID is too long. Max is %d, got %d", MaxChainIDLen, len(b.ChainID)) - } - - if b.Height < 0 { - return errors.New("negative Header.Height") - } else if b.Height == 0 { - return errors.New("zero Header.Height") - } - - // NOTE: Timestamp validation is subtle and handled elsewhere. - - if err := b.LastBlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong Header.LastBlockID: %v", err) + if err := b.Header.ValidateBasic(); err != nil { + return fmt.Errorf("invalid header: %w", err) } // Validate the last commit and its hash. - if b.Header.Height > 1 { - if b.LastCommit == nil { - return errors.New("nil LastCommit") - } - if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastCommit: %v", err) - } - } - if err := ValidateHash(b.LastCommitHash); err != nil { - return fmt.Errorf("wrong Header.LastCommitHash: %v", err) + if b.LastCommit == nil { + return errors.New("nil LastCommit") } - if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", - b.LastCommit.Hash(), - b.LastCommitHash, - ) + if err := b.LastCommit.ValidateBasic(); err != nil { + return fmt.Errorf("wrong LastCommit: %v", err) } - // Validate the hash of the transactions. - // NOTE: b.Data.Txs may be nil, but b.Data.Hash() - // still works fine - if err := ValidateHash(b.DataHash); err != nil { - return fmt.Errorf("wrong Header.DataHash: %v", err) - } - if !bytes.Equal(b.DataHash, b.Data.Hash()) { - return fmt.Errorf( - "wrong Header.DataHash. Expected %v, got %v", - b.Data.Hash(), - b.DataHash, - ) + if w, g := b.LastCommit.Hash(), b.LastCommitHash; !bytes.Equal(w, g) { + return fmt.Errorf("wrong Header.LastCommitHash. Expected %X, got %X", w, g) } - // Basic validation of hashes related to application data. - // Will validate fully against state in state#ValidateBlock. - if err := ValidateHash(b.ValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.ValidatorsHash: %v", err) - } - if err := ValidateHash(b.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.NextValidatorsHash: %v", err) - } - if err := ValidateHash(b.ConsensusHash); err != nil { - return fmt.Errorf("wrong Header.ConsensusHash: %v", err) - } - // NOTE: AppHash is arbitrary length - if err := ValidateHash(b.LastResultsHash); err != nil { - return fmt.Errorf("wrong Header.LastResultsHash: %v", err) + // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. + if w, g := b.Data.Hash(), b.DataHash; !bytes.Equal(w, g) { + return fmt.Errorf("wrong Header.DataHash. Expected %X, got %X", w, g) } - // Validate evidence and its hash. - if err := ValidateHash(b.EvidenceHash); err != nil { - return fmt.Errorf("wrong Header.EvidenceHash: %v", err) - } // NOTE: b.Evidence.Evidence may be nil, but we're just looping. for i, ev := range b.Evidence.Evidence { if err := ev.ValidateBasic(); err != nil { return fmt.Errorf("invalid evidence (#%d): %v", i, err) } } - if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", - b.EvidenceHash, - b.Evidence.Hash(), - ) - } - if len(b.ProposerAddress) != crypto.AddressSize { - return fmt.Errorf("expected len(Header.ProposerAddress) to be %d, got %d", - crypto.AddressSize, len(b.ProposerAddress)) + if w, g := b.Evidence.Hash(), b.EvidenceHash; !bytes.Equal(w, g) { + return fmt.Errorf("wrong Header.EvidenceHash. Expected %X, got %X", w, g) } return nil @@ -175,16 +127,18 @@ func (b *Block) Hash() tmbytes.HexBytes { // MakePartSet returns a PartSet containing parts of a serialized block. // This is the form in which the block is gossipped to peers. // CONTRACT: partSize is greater than zero. -func (b *Block) MakePartSet(partSize int) *PartSet { +func (b *Block) MakePartSet(partSize uint32) *PartSet { if b == nil { return nil } b.mtx.Lock() defer b.mtx.Unlock() - // We prefix the byte length, so that unmarshaling - // can easily happen via a reader. - bz, err := cdc.MarshalBinaryLengthPrefixed(b) + pbb, err := b.ToProto() + if err != nil { + panic(err) + } + bz, err := proto.Marshal(pbb) if err != nil { panic(err) } @@ -205,19 +159,28 @@ func (b *Block) HashesTo(hash []byte) bool { // Size returns size of the block in bytes. func (b *Block) Size() int { - bz, err := cdc.MarshalBinaryBare(b) + pbb, err := b.ToProto() if err != nil { return 0 } - return len(bz) + + return pbb.Size() } // String returns a string representation of the block +// +// See StringIndented. func (b *Block) String() string { return b.StringIndented("") } -// StringIndented returns a string representation of the block +// StringIndented returns an indented String. +// +// Header +// Data +// Evidence +// LastCommit +// Hash func (b *Block) StringIndented(indent string) string { if b == nil { return "nil-Block" @@ -235,34 +198,66 @@ func (b *Block) StringIndented(indent string) string { indent, b.Hash()) } -// StringShort returns a shortened string representation of the block +// StringShort returns a shortened string representation of the block. func (b *Block) StringShort() string { if b == nil { return "nil-Block" } - return fmt.Sprintf("Block#%v", b.Hash()) + return fmt.Sprintf("Block#%X", b.Hash()) } -//----------------------------------------------------------- -// These methods are for Protobuf Compatibility +// ToProto converts Block to protobuf +func (b *Block) ToProto() (*tmproto.Block, error) { + if b == nil { + return nil, errors.New("nil Block") + } -// Marshal returns the amino encoding. -func (b *Block) Marshal() ([]byte, error) { - return cdc.MarshalBinaryBare(b) -} + pb := new(tmproto.Block) -// MarshalTo calls Marshal and copies to the given buffer. -func (b *Block) MarshalTo(data []byte) (int, error) { - bs, err := b.Marshal() + pb.Header = *b.Header.ToProto() + pb.LastCommit = b.LastCommit.ToProto() + pb.Data = b.Data.ToProto() + + protoEvidence, err := b.Evidence.ToProto() if err != nil { - return -1, err + return nil, err } - return copy(data, bs), nil + pb.Evidence = *protoEvidence + + return pb, nil } -// Unmarshal deserializes from amino encoded form. -func (b *Block) Unmarshal(bs []byte) error { - return cdc.UnmarshalBinaryBare(bs, b) +// FromProto sets a protobuf Block to the given pointer. +// It returns an error if the block is invalid. +func BlockFromProto(bp *tmproto.Block) (*Block, error) { + if bp == nil { + return nil, errors.New("nil block") + } + + b := new(Block) + h, err := HeaderFromProto(&bp.Header) + if err != nil { + return nil, err + } + b.Header = h + data, err := DataFromProto(&bp.Data) + if err != nil { + return nil, err + } + b.Data = data + if err := b.Evidence.FromProto(&bp.Evidence); err != nil { + return nil, err + } + + if bp.LastCommit != nil { + lc, err := CommitFromProto(bp.LastCommit) + if err != nil { + return nil, err + } + b.LastCommit = lc + } + + return b, b.ValidateBasic() } //----------------------------------------------------------------------------- @@ -270,12 +265,12 @@ func (b *Block) Unmarshal(bs []byte) error { // MaxDataBytes returns the maximum size of block's data. // // XXX: Panics on negative result. -func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { +func MaxDataBytes(maxBytes, evidenceBytes int64, valsCount int) int64 { maxDataBytes := maxBytes - - MaxAminoOverheadForBlock - + MaxOverheadForBlock - MaxHeaderBytes - - int64(valsCount)*MaxVoteBytes - - int64(evidenceCount)*MaxEvidenceBytes + MaxCommitBytes(valsCount) - + evidenceBytes if maxDataBytes < 0 { panic(fmt.Sprintf( @@ -286,21 +281,18 @@ func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { } return maxDataBytes - } -// MaxDataBytesUnknownEvidence returns the maximum size of block's data when +// MaxDataBytesNoEvidence returns the maximum size of block's data when // evidence count is unknown. MaxEvidencePerBlock will be used for the size // of evidence. // // XXX: Panics on negative result. -func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { - _, maxEvidenceBytes := MaxEvidencePerBlock(maxBytes) +func MaxDataBytesNoEvidence(maxBytes int64, valsCount int) int64 { maxDataBytes := maxBytes - - MaxAminoOverheadForBlock - + MaxOverheadForBlock - MaxHeaderBytes - - int64(valsCount)*MaxVoteBytes - - maxEvidenceBytes + MaxCommitBytes(valsCount) if maxDataBytes < 0 { panic(fmt.Sprintf( @@ -313,6 +305,25 @@ func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { return maxDataBytes } +// MakeBlock returns a new block with an empty header, except what can be +// computed from itself. +// It populates the same set of fields validated by ValidateBasic. +func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { + block := &Block{ + Header: Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 0}, + Height: height, + }, + Data: Data{ + Txs: txs, + }, + Evidence: EvidenceData{Evidence: evidence}, + LastCommit: lastCommit, + } + block.fillHeader() + return block +} + //----------------------------------------------------------------------------- // Header defines the structure of a Tendermint block header. @@ -322,10 +333,10 @@ func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { // - https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md type Header struct { // basic block info - Version version.Consensus `json:"version"` - ChainID string `json:"chain_id"` - Height int64 `json:"height"` - Time time.Time `json:"time"` + Version tmversion.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` // prev block info LastBlockID BlockID `json:"last_block_id"` @@ -350,7 +361,7 @@ type Header struct { // Populate the Header with state-derived data. // Call this after MakeBlock to complete the Header. func (h *Header) Populate( - version version.Consensus, chainID string, + version tmversion.Consensus, chainID string, timestamp time.Time, lastBlockID BlockID, valHash, nextValHash []byte, consensusHash, appHash, lastResultsHash []byte, @@ -368,6 +379,66 @@ func (h *Header) Populate( h.ProposerAddress = proposerAddress } +// ValidateBasic performs stateless validation on a Header returning an error +// if any validation fails. +// +// NOTE: Timestamp validation is subtle and handled elsewhere. +func (h Header) ValidateBasic() error { + if h.Version.Block != version.BlockProtocol { + return fmt.Errorf("block protocol is incorrect: got: %d, want: %d ", h.Version.Block, version.BlockProtocol) + } + if len(h.ChainID) > MaxChainIDLen { + return fmt.Errorf("chainID is too long; got: %d, max: %d", len(h.ChainID), MaxChainIDLen) + } + + if h.Height < 0 { + return errors.New("negative Height") + } else if h.Height == 0 { + return errors.New("zero Height") + } + + if err := h.LastBlockID.ValidateBasic(); err != nil { + return fmt.Errorf("wrong LastBlockID: %w", err) + } + + if err := ValidateHash(h.LastCommitHash); err != nil { + return fmt.Errorf("wrong LastCommitHash: %v", err) + } + + if err := ValidateHash(h.DataHash); err != nil { + return fmt.Errorf("wrong DataHash: %v", err) + } + + if err := ValidateHash(h.EvidenceHash); err != nil { + return fmt.Errorf("wrong EvidenceHash: %v", err) + } + + if len(h.ProposerAddress) != crypto.AddressSize { + return fmt.Errorf( + "invalid ProposerAddress length; got: %d, expected: %d", + len(h.ProposerAddress), crypto.AddressSize, + ) + } + + // Basic validation of hashes related to application data. + // Will validate fully against state in state#ValidateBlock. + if err := ValidateHash(h.ValidatorsHash); err != nil { + return fmt.Errorf("wrong ValidatorsHash: %v", err) + } + if err := ValidateHash(h.NextValidatorsHash); err != nil { + return fmt.Errorf("wrong NextValidatorsHash: %v", err) + } + if err := ValidateHash(h.ConsensusHash); err != nil { + return fmt.Errorf("wrong ConsensusHash: %v", err) + } + // NOTE: AppHash is arbitrary length + if err := ValidateHash(h.LastResultsHash); err != nil { + return fmt.Errorf("wrong LastResultsHash: %v", err) + } + + return nil +} + // Hash returns the hash of the header. // It computes a Merkle tree from the header fields // ordered as they appear in the Header. @@ -378,12 +449,27 @@ func (h *Header) Hash() tmbytes.HexBytes { if h == nil || len(h.ValidatorsHash) == 0 { return nil } - return merkle.SimpleHashFromByteSlices([][]byte{ - cdcEncode(h.Version), + hbz, err := h.Version.Marshal() + if err != nil { + return nil + } + + pbt, err := gogotypes.StdTimeMarshal(h.Time) + if err != nil { + return nil + } + + pbbi := h.LastBlockID.ToProto() + bzbi, err := pbbi.Marshal() + if err != nil { + return nil + } + return merkle.HashFromByteSlices([][]byte{ + hbz, cdcEncode(h.ChainID), cdcEncode(h.Height), - cdcEncode(h.Time), - cdcEncode(h.LastBlockID), + pbt, + bzbi, cdcEncode(h.LastCommitHash), cdcEncode(h.DataHash), cdcEncode(h.ValidatorsHash), @@ -396,7 +482,7 @@ func (h *Header) Hash() tmbytes.HexBytes { }) } -// StringIndented returns a string representation of the header +// StringIndented returns an indented string representation of the header. func (h *Header) StringIndented(indent string) string { if h == nil { return "nil-Header" @@ -434,6 +520,63 @@ func (h *Header) StringIndented(indent string) string { indent, h.Hash()) } +// ToProto converts Header to protobuf +func (h *Header) ToProto() *tmproto.Header { + if h == nil { + return nil + } + + return &tmproto.Header{ + Version: h.Version, + ChainID: h.ChainID, + Height: h.Height, + Time: h.Time, + LastBlockId: h.LastBlockID.ToProto(), + ValidatorsHash: h.ValidatorsHash, + NextValidatorsHash: h.NextValidatorsHash, + ConsensusHash: h.ConsensusHash, + AppHash: h.AppHash, + DataHash: h.DataHash, + EvidenceHash: h.EvidenceHash, + LastResultsHash: h.LastResultsHash, + LastCommitHash: h.LastCommitHash, + ProposerAddress: h.ProposerAddress, + } +} + +// FromProto sets a protobuf Header to the given pointer. +// It returns an error if the header is invalid. +func HeaderFromProto(ph *tmproto.Header) (Header, error) { + if ph == nil { + return Header{}, errors.New("nil Header") + } + + h := new(Header) + + bi, err := BlockIDFromProto(&ph.LastBlockId) + if err != nil { + return Header{}, err + } + + h.Version = ph.Version + h.ChainID = ph.ChainID + h.Height = ph.Height + h.Time = ph.Time + h.Height = ph.Height + h.LastBlockID = *bi + h.ValidatorsHash = ph.ValidatorsHash + h.NextValidatorsHash = ph.NextValidatorsHash + h.ConsensusHash = ph.ConsensusHash + h.AppHash = ph.AppHash + h.DataHash = ph.DataHash + h.EvidenceHash = ph.EvidenceHash + h.LastResultsHash = ph.LastResultsHash + h.LastCommitHash = ph.LastCommitHash + h.ProposerAddress = ph.ProposerAddress + + return *h, h.ValidateBasic() +} + //------------------------------------- // BlockIDFlag indicates which BlockID the signature is for. @@ -448,6 +591,14 @@ const ( BlockIDFlagNil ) +const ( + // Max size of commit without any commitSigs -> 82 for BlockID, 8 for Height, 4 for Round. + MaxCommitOverheadBytes int64 = 94 + // Commit sig size is made up of 64 bytes for the signature, 20 bytes for the address, + // 1 byte for the flag and 14 bytes for the timestamp + MaxCommitSigBytes int64 = 109 +) + // CommitSig is a part of the Vote included in a Commit. type CommitSig struct { BlockIDFlag BlockIDFlag `json:"block_id_flag"` @@ -466,9 +617,10 @@ func NewCommitSigForBlock(signature []byte, valAddr Address, ts time.Time) Commi } } -// ForBlock returns true if CommitSig is for the block. -func (cs CommitSig) ForBlock() bool { - return cs.BlockIDFlag == BlockIDFlagCommit +func MaxCommitBytes(valCount int) int64 { + // From the repeated commit sig field + var protoEncodingOverhead int64 = 2 + return MaxCommitOverheadBytes + ((MaxCommitSigBytes + protoEncodingOverhead) * int64(valCount)) } // NewCommitSigAbsent returns new CommitSig with BlockIDFlagAbsent. Other @@ -479,11 +631,22 @@ func NewCommitSigAbsent() CommitSig { } } +// ForBlock returns true if CommitSig is for the block. +func (cs CommitSig) ForBlock() bool { + return cs.BlockIDFlag == BlockIDFlagCommit +} + // Absent returns true if CommitSig is absent. func (cs CommitSig) Absent() bool { return cs.BlockIDFlag == BlockIDFlagAbsent } +// CommitSig returns a string representation of CommitSig. +// +// 1. first 6 bytes of signature +// 2. first 6 bytes of validator address +// 3. block ID flag +// 4. timestamp func (cs CommitSig) String() string { return fmt.Sprintf("CommitSig{%X by %X on %v @ %s}", tmbytes.Fingerprint(cs.Signature), @@ -549,6 +712,32 @@ func (cs CommitSig) ValidateBasic() error { return nil } +// ToProto converts CommitSig to protobuf +func (cs *CommitSig) ToProto() *tmproto.CommitSig { + if cs == nil { + return nil + } + + return &tmproto.CommitSig{ + BlockIdFlag: tmproto.BlockIDFlag(cs.BlockIDFlag), + ValidatorAddress: cs.ValidatorAddress, + Timestamp: cs.Timestamp, + Signature: cs.Signature, + } +} + +// FromProto sets a protobuf CommitSig to the given pointer. +// It returns an error if the CommitSig is invalid. +func (cs *CommitSig) FromProto(csp tmproto.CommitSig) error { + + cs.BlockIDFlag = BlockIDFlag(csp.BlockIdFlag) + cs.ValidatorAddress = csp.ValidatorAddress + cs.Timestamp = csp.Timestamp + cs.Signature = csp.Signature + + return cs.ValidateBasic() +} + //------------------------------------- // Commit contains the evidence that a block was committed by a set of validators. @@ -559,7 +748,7 @@ type Commit struct { // Any peer with a block can gossip signatures by index with a peer without // recalculating the active ValidatorSet. Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` BlockID BlockID `json:"block_id"` Signatures []CommitSig `json:"signatures"` @@ -571,7 +760,7 @@ type Commit struct { } // NewCommit returns a new Commit. -func NewCommit(height int64, round int, blockID BlockID, commitSigs []CommitSig) *Commit { +func NewCommit(height int64, round int32, blockID BlockID, commitSigs []CommitSig) *Commit { return &Commit{ Height: height, Round: round, @@ -584,12 +773,12 @@ func NewCommit(height int64, round int, blockID BlockID, commitSigs []CommitSig) // Panics if signatures from the commit can't be added to the voteset. // Inverse of VoteSet.MakeCommit(). func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, vals) + voteSet := NewVoteSet(chainID, commit.Height, commit.Round, tmproto.PrecommitType, vals) for idx, commitSig := range commit.Signatures { if commitSig.Absent() { continue // OK, some precommits can be missing. } - added, err := voteSet.AddVote(commit.GetVote(idx)) + added, err := voteSet.AddVote(commit.GetVote(int32(idx))) if !added || err != nil { panic(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) } @@ -600,10 +789,10 @@ func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSe // GetVote converts the CommitSig for the given valIdx to a Vote. // Returns nil if the precommit at valIdx is nil. // Panics if valIdx >= commit.Size(). -func (commit *Commit) GetVote(valIdx int) *Vote { +func (commit *Commit) GetVote(valIdx int32) *Vote { commitSig := commit.Signatures[valIdx] return &Vote{ - Type: PrecommitType, + Type: tmproto.PrecommitType, Height: commit.Height, Round: commit.Round, BlockID: commitSig.BlockID(commit.BlockID), @@ -614,18 +803,24 @@ func (commit *Commit) GetVote(valIdx int) *Vote { } } -// VoteSignBytes constructs the SignBytes for the given CommitSig. -// The only unique part of the SignBytes is the Timestamp - all other fields -// signed over are otherwise the same for all validators. +// VoteSignBytes returns the bytes of the Vote corresponding to valIdx for +// signing. +// +// The only unique part is the Timestamp - all other fields signed over are +// otherwise the same for all validators. +// // Panics if valIdx >= commit.Size(). -func (commit *Commit) VoteSignBytes(chainID string, valIdx int) []byte { - return commit.GetVote(valIdx).SignBytes(chainID) +// +// See VoteSignBytes +func (commit *Commit) VoteSignBytes(chainID string, valIdx int32) []byte { + v := commit.GetVote(valIdx).ToProto() + return VoteSignBytes(chainID, v) } // Type returns the vote type of the commit, which is always VoteTypePrecommit // Implements VoteSetReader. func (commit *Commit) Type() byte { - return byte(PrecommitType) + return byte(tmproto.PrecommitType) } // GetHeight returns height of the commit. @@ -636,7 +831,7 @@ func (commit *Commit) GetHeight() int64 { // GetRound returns height of the commit. // Implements VoteSetReader. -func (commit *Commit) GetRound() int { +func (commit *Commit) GetRound() int32 { return commit.Round } @@ -666,7 +861,7 @@ func (commit *Commit) BitArray() *bits.BitArray { // GetByIndex returns the vote corresponding to a given validator index. // Panics if `index >= commit.Size()`. // Implements VoteSetReader. -func (commit *Commit) GetByIndex(valIdx int) *Vote { +func (commit *Commit) GetByIndex(valIdx int32) *Vote { return commit.GetVote(valIdx) } @@ -686,19 +881,20 @@ func (commit *Commit) ValidateBasic() error { return errors.New("negative Round") } - if commit.BlockID.IsZero() { - return errors.New("commit cannot be for nil block") - } + if commit.Height >= 1 { + if commit.BlockID.IsZero() { + return errors.New("commit cannot be for nil block") + } - if len(commit.Signatures) == 0 { - return errors.New("no signatures in commit") - } - for i, commitSig := range commit.Signatures { - if err := commitSig.ValidateBasic(); err != nil { - return fmt.Errorf("wrong CommitSig #%d: %v", i, err) + if len(commit.Signatures) == 0 { + return errors.New("no signatures in commit") + } + for i, commitSig := range commit.Signatures { + if err := commitSig.ValidateBasic(); err != nil { + return fmt.Errorf("wrong CommitSig #%d: %v", i, err) + } } } - return nil } @@ -710,14 +906,20 @@ func (commit *Commit) Hash() tmbytes.HexBytes { if commit.hash == nil { bs := make([][]byte, len(commit.Signatures)) for i, commitSig := range commit.Signatures { - bs[i] = cdcEncode(commitSig) + pbcs := commitSig.ToProto() + bz, err := pbcs.Marshal() + if err != nil { + panic(err) + } + + bs[i] = bz } - commit.hash = merkle.SimpleHashFromByteSlices(bs) + commit.hash = merkle.HashFromByteSlices(bs) } return commit.hash } -// StringIndented returns a string representation of the commit +// StringIndented returns a string representation of the commit. func (commit *Commit) StringIndented(indent string) string { if commit == nil { return "nil-Commit" @@ -741,67 +943,55 @@ func (commit *Commit) StringIndented(indent string) string { indent, commit.hash) } -//----------------------------------------------------------------------------- +// ToProto converts Commit to protobuf +func (commit *Commit) ToProto() *tmproto.Commit { + if commit == nil { + return nil + } -// SignedHeader is a header along with the commits that prove it. -// It is the basis of the lite client. -type SignedHeader struct { - *Header `json:"header"` - Commit *Commit `json:"commit"` -} - -// ValidateBasic does basic consistency checks and makes sure the header -// and commit are consistent. -// NOTE: This does not actually check the cryptographic signatures. Make -// sure to use a Verifier to validate the signatures actually provide a -// significantly strong proof for this header's validity. -func (sh SignedHeader) ValidateBasic(chainID string) error { - // Make sure the header is consistent with the commit. - if sh.Header == nil { - return errors.New("signedHeader missing header") - } - if sh.Commit == nil { - return errors.New("signedHeader missing commit (precommit votes)") - } - - // Check ChainID. - if sh.ChainID != chainID { - return fmt.Errorf("signedHeader belongs to another chain '%s' not '%s'", - sh.ChainID, chainID) - } - // Check Height. - if sh.Commit.Height != sh.Height { - return fmt.Errorf("signedHeader header and commit height mismatch: %v vs %v", - sh.Height, sh.Commit.Height) - } - // Check Hash. - hhash := sh.Hash() - chash := sh.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return fmt.Errorf("signedHeader commit signs block %X, header is block %X", - chash, hhash) - } - // ValidateBasic on the Commit. - err := sh.Commit.ValidateBasic() - if err != nil { - return errors.Wrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + c := new(tmproto.Commit) + sigs := make([]tmproto.CommitSig, len(commit.Signatures)) + for i := range commit.Signatures { + sigs[i] = *commit.Signatures[i].ToProto() } - return nil -} + c.Signatures = sigs -func (sh SignedHeader) String() string { - return sh.StringIndented("") + c.Height = commit.Height + c.Round = commit.Round + c.BlockID = commit.BlockID.ToProto() + + return c } -// StringIndented returns a string representation of the SignedHeader. -func (sh SignedHeader) StringIndented(indent string) string { - return fmt.Sprintf(`SignedHeader{ -%s %v -%s %v -%s}`, - indent, sh.Header.StringIndented(indent+" "), - indent, sh.Commit.StringIndented(indent+" "), - indent) +// FromProto sets a protobuf Commit to the given pointer. +// It returns an error if the commit is invalid. +func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { + if cp == nil { + return nil, errors.New("nil Commit") + } + + var ( + commit = new(Commit) + ) + + bi, err := BlockIDFromProto(&cp.BlockID) + if err != nil { + return nil, err + } + + sigs := make([]CommitSig, len(cp.Signatures)) + for i := range cp.Signatures { + if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { + return nil, err + } + } + commit.Signatures = sigs + + commit.Height = cp.Height + commit.Round = cp.Round + commit.BlockID = *bi + + return commit, commit.ValidateBasic() } //----------------------------------------------------------------------------- @@ -829,7 +1019,7 @@ func (data *Data) Hash() tmbytes.HexBytes { return data.hash } -// StringIndented returns a string representation of the transactions +// StringIndented returns an indented string representation of the transactions. func (data *Data) StringIndented(indent string) string { if data == nil { return "nil-Data" @@ -849,14 +1039,51 @@ func (data *Data) StringIndented(indent string) string { indent, data.hash) } +// ToProto converts Data to protobuf +func (data *Data) ToProto() tmproto.Data { + tp := new(tmproto.Data) + + if len(data.Txs) > 0 { + txBzs := make([][]byte, len(data.Txs)) + for i := range data.Txs { + txBzs[i] = data.Txs[i] + } + tp.Txs = txBzs + } + + return *tp +} + +// DataFromProto takes a protobuf representation of Data & +// returns the native type. +func DataFromProto(dp *tmproto.Data) (Data, error) { + if dp == nil { + return Data{}, errors.New("nil data") + } + data := new(Data) + + if len(dp.Txs) > 0 { + txBzs := make(Txs, len(dp.Txs)) + for i := range dp.Txs { + txBzs[i] = Tx(dp.Txs[i]) + } + data.Txs = txBzs + } else { + data.Txs = Txs{} + } + + return *data, nil +} + //----------------------------------------------------------------------------- // EvidenceData contains any evidence of malicious wrong-doing by validators type EvidenceData struct { Evidence EvidenceList `json:"evidence"` - // Volatile - hash tmbytes.HexBytes + // Volatile. Used as cache + hash tmbytes.HexBytes + byteSize int64 } // Hash returns the hash of the data. @@ -867,6 +1094,18 @@ func (data *EvidenceData) Hash() tmbytes.HexBytes { return data.hash } +// ByteSize returns the total byte size of all the evidence +func (data *EvidenceData) ByteSize() int64 { + if data.byteSize == 0 && len(data.Evidence) != 0 { + pb, err := data.ToProto() + if err != nil { + panic(err) + } + data.byteSize = int64(pb.Size()) + } + return data.byteSize +} + // StringIndented returns a string representation of the evidence. func (data *EvidenceData) StringIndented(indent string) string { if data == nil { @@ -887,26 +1126,68 @@ func (data *EvidenceData) StringIndented(indent string) string { indent, data.hash) } +// ToProto converts EvidenceData to protobuf +func (data *EvidenceData) ToProto() (*tmproto.EvidenceList, error) { + if data == nil { + return nil, errors.New("nil evidence data") + } + + evi := new(tmproto.EvidenceList) + eviBzs := make([]tmproto.Evidence, len(data.Evidence)) + for i := range data.Evidence { + protoEvi, err := EvidenceToProto(data.Evidence[i]) + if err != nil { + return nil, err + } + eviBzs[i] = *protoEvi + } + evi.Evidence = eviBzs + + return evi, nil +} + +// FromProto sets a protobuf EvidenceData to the given pointer. +func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceList) error { + if eviData == nil { + return errors.New("nil evidenceData") + } + + eviBzs := make(EvidenceList, len(eviData.Evidence)) + for i := range eviData.Evidence { + evi, err := EvidenceFromProto(&eviData.Evidence[i]) + if err != nil { + return err + } + eviBzs[i] = evi + } + data.Evidence = eviBzs + data.byteSize = int64(eviData.Size()) + + return nil +} + //-------------------------------------------------------------------------------- -// BlockID defines the unique ID of a block as its Hash and its PartSetHeader +// BlockID type BlockID struct { - Hash tmbytes.HexBytes `json:"hash"` - PartsHeader PartSetHeader `json:"parts"` + Hash tmbytes.HexBytes `json:"hash"` + PartSetHeader PartSetHeader `json:"part_set_header"` } // Equals returns true if the BlockID matches the given BlockID func (blockID BlockID) Equals(other BlockID) bool { return bytes.Equal(blockID.Hash, other.Hash) && - blockID.PartsHeader.Equals(other.PartsHeader) + blockID.PartSetHeader.Equals(other.PartSetHeader) } // Key returns a machine-readable string representation of the BlockID func (blockID BlockID) Key() string { - bz, err := cdc.MarshalBinaryBare(blockID.PartsHeader) + pbph := blockID.PartSetHeader.ToProto() + bz, err := pbph.Marshal() if err != nil { panic(err) } + return string(blockID.Hash) + string(bz) } @@ -916,8 +1197,8 @@ func (blockID BlockID) ValidateBasic() error { if err := ValidateHash(blockID.Hash); err != nil { return fmt.Errorf("wrong Hash") } - if err := blockID.PartsHeader.ValidateBasic(); err != nil { - return fmt.Errorf("wrong PartsHeader: %v", err) + if err := blockID.PartSetHeader.ValidateBasic(); err != nil { + return fmt.Errorf("wrong PartSetHeader: %v", err) } return nil } @@ -925,17 +1206,53 @@ func (blockID BlockID) ValidateBasic() error { // IsZero returns true if this is the BlockID of a nil block. func (blockID BlockID) IsZero() bool { return len(blockID.Hash) == 0 && - blockID.PartsHeader.IsZero() + blockID.PartSetHeader.IsZero() } // IsComplete returns true if this is a valid BlockID of a non-nil block. func (blockID BlockID) IsComplete() bool { return len(blockID.Hash) == tmhash.Size && - blockID.PartsHeader.Total > 0 && - len(blockID.PartsHeader.Hash) == tmhash.Size + blockID.PartSetHeader.Total > 0 && + len(blockID.PartSetHeader.Hash) == tmhash.Size } -// String returns a human readable string representation of the BlockID +// String returns a human readable string representation of the BlockID. +// +// 1. hash +// 2. part set header +// +// See PartSetHeader#String func (blockID BlockID) String() string { - return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) + return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartSetHeader) +} + +// ToProto converts BlockID to protobuf +func (blockID *BlockID) ToProto() tmproto.BlockID { + if blockID == nil { + return tmproto.BlockID{} + } + + return tmproto.BlockID{ + Hash: blockID.Hash, + PartSetHeader: blockID.PartSetHeader.ToProto(), + } +} + +// FromProto sets a protobuf BlockID to the given pointer. +// It returns an error if the block id is invalid. +func BlockIDFromProto(bID *tmproto.BlockID) (*BlockID, error) { + if bID == nil { + return nil, errors.New("nil BlockID") + } + + blockID := new(BlockID) + ph, err := PartSetHeaderFromProto(&bID.PartSetHeader) + if err != nil { + return nil, err + } + + blockID.PartSetHeader = *ph + blockID.Hash = bID.Hash + + return blockID, blockID.ValidateBasic() } diff --git a/types/block_meta.go b/types/block_meta.go index fc453c4b4..1ce262d9a 100644 --- a/types/block_meta.go +++ b/types/block_meta.go @@ -2,8 +2,10 @@ package types import ( "bytes" + "errors" + "fmt" - "github.com/pkg/errors" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) // BlockMeta contains meta information. @@ -24,32 +26,43 @@ func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta { } } -//----------------------------------------------------------- -// These methods are for Protobuf Compatibility +func (bm *BlockMeta) ToProto() *tmproto.BlockMeta { + if bm == nil { + return nil + } -// Size returns the size of the amino encoding, in bytes. -func (bm *BlockMeta) Size() int { - bs, _ := bm.Marshal() - return len(bs) + pb := &tmproto.BlockMeta{ + BlockID: bm.BlockID.ToProto(), + BlockSize: int64(bm.BlockSize), + Header: *bm.Header.ToProto(), + NumTxs: int64(bm.NumTxs), + } + return pb } -// Marshal returns the amino encoding. -func (bm *BlockMeta) Marshal() ([]byte, error) { - return cdc.MarshalBinaryBare(bm) -} +func BlockMetaFromProto(pb *tmproto.BlockMeta) (*BlockMeta, error) { + if pb == nil { + return nil, errors.New("blockmeta is empty") + } + + bm := new(BlockMeta) -// MarshalTo calls Marshal and copies to the given buffer. -func (bm *BlockMeta) MarshalTo(data []byte) (int, error) { - bs, err := bm.Marshal() + bi, err := BlockIDFromProto(&pb.BlockID) if err != nil { - return -1, err + return nil, err } - return copy(data, bs), nil -} -// Unmarshal deserializes from amino encoded form. -func (bm *BlockMeta) Unmarshal(bs []byte) error { - return cdc.UnmarshalBinaryBare(bs, bm) + h, err := HeaderFromProto(&pb.Header) + if err != nil { + return nil, err + } + + bm.BlockID = *bi + bm.BlockSize = int(pb.BlockSize) + bm.Header = h + bm.NumTxs = int(pb.NumTxs) + + return bm, bm.ValidateBasic() } // ValidateBasic performs basic validation. @@ -58,7 +71,7 @@ func (bm *BlockMeta) ValidateBasic() error { return err } if !bytes.Equal(bm.BlockID.Hash, bm.Header.Hash()) { - return errors.Errorf("expected BlockID#Hash and Header#Hash to be the same, got %X != %X", + return fmt.Errorf("expected BlockID#Hash and Header#Hash to be the same, got %X != %X", bm.BlockID.Hash, bm.Header.Hash()) } return nil diff --git a/types/block_meta_test.go b/types/block_meta_test.go index 882ff8573..1e29a132a 100644 --- a/types/block_meta_test.go +++ b/types/block_meta_test.go @@ -1,7 +1,95 @@ package types -import "testing" +import ( + "testing" -func TestBlockMetaValidateBasic(t *testing.T) { - // TODO + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +func TestBlockMeta_ToProto(t *testing.T) { + h := makeRandHeader() + bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} + + bm := &BlockMeta{ + BlockID: bi, + BlockSize: 200, + Header: h, + NumTxs: 0, + } + + tests := []struct { + testName string + bm *BlockMeta + expErr bool + }{ + {"success", bm, false}, + {"failure nil", nil, true}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + pb := tt.bm.ToProto() + + bm, err := BlockMetaFromProto(pb) + + if !tt.expErr { + require.NoError(t, err, tt.testName) + require.Equal(t, tt.bm, bm, tt.testName) + } else { + require.Error(t, err, tt.testName) + } + }) + } +} + +func TestBlockMeta_ValidateBasic(t *testing.T) { + h := makeRandHeader() + bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} + bi2 := BlockID{Hash: tmrand.Bytes(tmhash.Size), + PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} + bi3 := BlockID{Hash: []byte("incorrect hash"), + PartSetHeader: PartSetHeader{Total: 123, Hash: []byte("incorrect hash")}} + + bm := &BlockMeta{ + BlockID: bi, + BlockSize: 200, + Header: h, + NumTxs: 0, + } + + bm2 := &BlockMeta{ + BlockID: bi2, + BlockSize: 200, + Header: h, + NumTxs: 0, + } + + bm3 := &BlockMeta{ + BlockID: bi3, + BlockSize: 200, + Header: h, + NumTxs: 0, + } + + tests := []struct { + name string + bm *BlockMeta + wantErr bool + }{ + {"success", bm, false}, + {"failure wrong blockID hash", bm2, true}, + {"failure wrong length blockID hash", bm3, true}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + if err := tt.bm.ValidateBasic(); (err != nil) != tt.wantErr { + t.Errorf("BlockMeta.ValidateBasic() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } } diff --git a/types/block_test.go b/types/block_test.go index fe3da920c..b553c695e 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + gogotypes "github.com/gogo/protobuf/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,13 +21,13 @@ import ( "github.com/tendermint/tendermint/libs/bits" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" ) func TestMain(m *testing.M) { - RegisterMockEvidences(cdc) - code := m.Run() os.Exit(code) } @@ -36,11 +37,11 @@ func TestBlockAddEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} block := MakeBlock(h, txs, commit, evList) @@ -56,11 +57,11 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} testCases := []struct { @@ -84,7 +85,20 @@ func TestBlockValidateBasic(t *testing.T) { blk.DataHash = tmrand.Bytes(len(blk.DataHash)) }, true}, {"Tampered EvidenceHash", func(blk *Block) { - blk.EvidenceHash = []byte("something else") + blk.EvidenceHash = tmrand.Bytes(len(blk.EvidenceHash)) + }, true}, + {"Incorrect block protocol version", func(blk *Block) { + blk.Version.Block = 1 + }, true}, + {"Missing LastCommit", func(blk *Block) { + blk.LastCommit = nil + }, true}, + {"Invalid LastCommit", func(blk *Block) { + blk.LastCommit = NewCommit(-1, 0, *voteSet.maj23, nil) + }, true}, + {"Invalid Evidence", func(blk *Block) { + emptyEv := &DuplicateVoteEvidence{} + blk.Evidence = EvidenceData{Evidence: []Evidence{emptyEv}} }, true}, } for i, tc := range testCases { @@ -95,6 +109,7 @@ func TestBlockValidateBasic(t *testing.T) { block.ProposerAddress = valSet.GetProposer().Address tc.malleateBlock(block) err = block.ValidateBasic() + t.Log(err) assert.Equal(t, tc.expErr, err != nil, "#%d: %v", i, err) }) } @@ -110,7 +125,7 @@ func TestBlockMakePartSet(t *testing.T) { partSet := MakeBlock(int64(3), []Tx{Tx("Hello World")}, nil, nil).MakePartSet(1024) assert.NotNil(t, partSet) - assert.Equal(t, 1, partSet.Total()) + assert.EqualValues(t, 1, partSet.Total()) } func TestBlockMakePartSetWithEvidence(t *testing.T) { @@ -119,16 +134,16 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) assert.NotNil(t, partSet) - assert.Equal(t, 3, partSet.Total()) + assert.EqualValues(t, 4, partSet.Total()) } func TestBlockHashesTo(t *testing.T) { @@ -136,11 +151,11 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain") evList := []Evidence{ev} block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) @@ -173,12 +188,12 @@ func makeBlockIDRandom() BlockID { blockHash = make([]byte, tmhash.Size) partSetHash = make([]byte, tmhash.Size) ) - rand.Read(blockHash) //nolint: gosec - rand.Read(partSetHash) //nolint: gosec + rand.Read(blockHash) //nolint: errcheck // ignore errcheck for read + rand.Read(partSetHash) //nolint: errcheck // ignore errcheck for read return BlockID{blockHash, PartSetHeader{123, partSetHash}} } -func makeBlockID(hash []byte, partSetSize int, partSetHash []byte) BlockID { +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var ( h = make([]byte, tmhash.Size) psH = make([]byte, tmhash.Size) @@ -187,7 +202,7 @@ func makeBlockID(hash []byte, partSetSize int, partSetHash []byte) BlockID { copy(psH, partSetHash) return BlockID{ Hash: h, - PartsHeader: PartSetHeader{ + PartSetHeader: PartSetHeader{ Total: partSetSize, Hash: psH, }, @@ -196,26 +211,31 @@ func makeBlockID(hash []byte, partSetSize int, partSetHash []byte) BlockID { var nilBytes []byte +// This follows RFC-6962, i.e. `echo -n '' | sha256sum` +var emptyBytes = []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, + 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55} + func TestNilHeaderHashDoesntCrash(t *testing.T) { - assert.Equal(t, []byte((*Header)(nil).Hash()), nilBytes) - assert.Equal(t, []byte((new(Header)).Hash()), nilBytes) + assert.Equal(t, nilBytes, []byte((*Header)(nil).Hash())) + assert.Equal(t, nilBytes, []byte((new(Header)).Hash())) } func TestNilDataHashDoesntCrash(t *testing.T) { - assert.Equal(t, []byte((*Data)(nil).Hash()), nilBytes) - assert.Equal(t, []byte(new(Data).Hash()), nilBytes) + assert.Equal(t, emptyBytes, []byte((*Data)(nil).Hash())) + assert.Equal(t, emptyBytes, []byte(new(Data).Hash())) } func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) assert.Equal(t, h-1, commit.Height) - assert.Equal(t, 1, commit.Round) - assert.Equal(t, PrecommitType, SignedMsgType(commit.Type())) + assert.EqualValues(t, 1, commit.Round) + assert.Equal(t, tmproto.PrecommitType, tmproto.SignedMsgType(commit.Type())) if commit.Size() <= 0 { t.Fatalf("commit %v has a zero or negative size: %d", commit, commit.Size()) } @@ -248,6 +268,51 @@ func TestCommitValidateBasic(t *testing.T) { } } +func TestMaxCommitBytes(t *testing.T) { + // time is varint encoded so need to pick the max. + // year int, month Month, day, hour, min, sec, nsec int, loc *Location + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + + cs := CommitSig{ + BlockIDFlag: BlockIDFlagNil, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + Timestamp: timestamp, + Signature: crypto.CRandBytes(MaxSignatureSize), + } + + pbSig := cs.ToProto() + // test that a single commit sig doesn't exceed max commit sig bytes + assert.EqualValues(t, MaxCommitSigBytes, pbSig.Size()) + + // check size with a single commit + commit := &Commit{ + Height: math.MaxInt64, + Round: math.MaxInt32, + BlockID: BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartSetHeader: PartSetHeader{ + Total: math.MaxInt32, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + Signatures: []CommitSig{cs}, + } + + pb := commit.ToProto() + + assert.EqualValues(t, MaxCommitBytes(1), int64(pb.Size())) + + // check the upper bound of the commit size + for i := 1; i < MaxVotesCount; i++ { + commit.Signatures = append(commit.Signatures, cs) + } + + pb = commit.ToProto() + + assert.EqualValues(t, MaxCommitBytes(MaxVotesCount), int64(pb.Size())) + +} + func TestHeaderHash(t *testing.T) { testCases := []struct { desc string @@ -255,7 +320,7 @@ func TestHeaderHash(t *testing.T) { expectHash bytes.HexBytes }{ {"Generates expected hash", &Header{ - Version: version.Consensus{Block: 1, App: 2}, + Version: tmversion.Consensus{Block: 1, App: 2}, ChainID: "chainId", Height: 3, Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), @@ -269,10 +334,10 @@ func TestHeaderHash(t *testing.T) { LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: crypto.AddressHash([]byte("proposer_address")), - }, hexBytesFromString("ABDC78921B18A47EE6BEF5E31637BADB0F3E587E3C0F4DB2D1E93E9FF0533862")}, + }, hexBytesFromString("F740121F553B5418C3EFBD343C2DBFE9E007BB67B0D020A0741374BAB65242A4")}, {"nil header yields nil", nil, nil}, {"nil ValidatorsHash yields nil", &Header{ - Version: version.Consensus{Block: 1, App: 2}, + Version: tmversion.Consensus{Block: 1, App: 2}, ChainID: "chainId", Height: 3, Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), @@ -297,15 +362,36 @@ func TestHeaderHash(t *testing.T) { // fields in the test struct are non-zero. if tc.header != nil && tc.expectHash != nil { byteSlices := [][]byte{} + s := reflect.ValueOf(*tc.header) for i := 0; i < s.NumField(); i++ { f := s.Field(i) + assert.False(t, f.IsZero(), "Found zero-valued field %v", s.Type().Field(i).Name) - byteSlices = append(byteSlices, cdcEncode(f.Interface())) + + switch f := f.Interface().(type) { + case int64, bytes.HexBytes, string: + byteSlices = append(byteSlices, cdcEncode(f)) + case time.Time: + bz, err := gogotypes.StdTimeMarshal(f) + require.NoError(t, err) + byteSlices = append(byteSlices, bz) + case tmversion.Consensus: + bz, err := f.Marshal() + require.NoError(t, err) + byteSlices = append(byteSlices, bz) + case BlockID: + pbbi := f.ToProto() + bz, err := pbbi.Marshal() + require.NoError(t, err) + byteSlices = append(byteSlices, bz) + default: + t.Errorf("unknown type %T", f) + } } assert.Equal(t, - bytes.HexBytes(merkle.SimpleHashFromByteSlices(byteSlices)), tc.header.Hash()) + bytes.HexBytes(merkle.HashFromByteSlices(byteSlices)), tc.header.Hash()) } }) } @@ -326,11 +412,11 @@ func TestMaxHeaderBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + Version: tmversion.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, ChainID: maxChainID, Height: math.MaxInt64, Time: timestamp, - LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), + LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt32, make([]byte, tmhash.Size)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), @@ -342,7 +428,7 @@ func TestMaxHeaderBytes(t *testing.T) { ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } - bz, err := cdc.MarshalBinaryLengthPrefixed(h) + bz, err := h.ToProto().Marshal() require.NoError(t, err) assert.EqualValues(t, MaxHeaderBytes, int64(len(bz))) @@ -351,7 +437,7 @@ func TestMaxHeaderBytes(t *testing.T) { func randCommit(now time.Time) *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, now) if err != nil { panic(err) @@ -371,33 +457,35 @@ func TestBlockMaxDataBytes(t *testing.T) { testCases := []struct { maxBytes int64 valsCount int - evidenceCount int + evidenceBytes int64 panics bool result int64 }{ 0: {-10, 1, 0, true, 0}, 1: {10, 1, 0, true, 0}, - 2: {865, 1, 0, true, 0}, - 3: {866, 1, 0, false, 0}, - 4: {867, 1, 0, false, 1}, + 2: {841, 1, 0, true, 0}, + 3: {842, 1, 0, false, 0}, + 4: {843, 1, 0, false, 1}, + 5: {954, 2, 0, false, 1}, + 6: {1053, 2, 100, false, 0}, } for i, tc := range testCases { tc := tc if tc.panics { assert.Panics(t, func() { - MaxDataBytes(tc.maxBytes, tc.valsCount, tc.evidenceCount) + MaxDataBytes(tc.maxBytes, tc.evidenceBytes, tc.valsCount) }, "#%v", i) } else { assert.Equal(t, tc.result, - MaxDataBytes(tc.maxBytes, tc.valsCount, tc.evidenceCount), + MaxDataBytes(tc.maxBytes, tc.evidenceBytes, tc.valsCount), "#%v", i) } } } -func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { +func TestBlockMaxDataBytesNoEvidence(t *testing.T) { testCases := []struct { maxBytes int64 valsCount int @@ -406,21 +494,21 @@ func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { }{ 0: {-10, 1, true, 0}, 1: {10, 1, true, 0}, - 2: {961, 1, true, 0}, - 3: {962, 1, false, 0}, - 4: {963, 1, false, 1}, + 2: {841, 1, true, 0}, + 3: {842, 1, false, 0}, + 4: {843, 1, false, 1}, } for i, tc := range testCases { tc := tc if tc.panics { assert.Panics(t, func() { - MaxDataBytesUnknownEvidence(tc.maxBytes, tc.valsCount) + MaxDataBytesNoEvidence(tc.maxBytes, tc.valsCount) }, "#%v", i) } else { assert.Equal(t, tc.result, - MaxDataBytesUnknownEvidence(tc.maxBytes, tc.valsCount), + MaxDataBytesNoEvidence(tc.maxBytes, tc.valsCount), "#%v", i) } } @@ -430,21 +518,24 @@ func TestCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) assert.NoError(t, err) chainID := voteSet.ChainID() voteSet2 := CommitToVoteSet(chainID, commit, valSet) - for i := 0; i < len(vals); i++ { + for i := int32(0); int(i) < len(vals); i++ { vote1 := voteSet.GetByIndex(i) vote2 := voteSet2.GetByIndex(i) vote3 := commit.GetVote(i) - vote1bz := cdc.MustMarshalBinaryBare(vote1) - vote2bz := cdc.MustMarshalBinaryBare(vote2) - vote3bz := cdc.MustMarshalBinaryBare(vote3) + vote1bz, err := vote1.ToProto().Marshal() + require.NoError(t, err) + vote2bz, err := vote2.ToProto().Marshal() + require.NoError(t, err) + vote3bz, err := vote3.ToProto().Marshal() + require.NoError(t, err) assert.Equal(t, vote1bz, vote2bz) assert.Equal(t, vote1bz, vote3bz) } @@ -470,9 +561,9 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1) + voteSet, valSet, vals := randVoteSet(height-1, round, tmproto.PrecommitType, tc.numValidators, 1) - vi := 0 + vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { pubKey, err := vals[vi].GetPubKey() @@ -482,7 +573,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { ValidatorIndex: vi, Height: height - 1, Round: round, - Type: PrecommitType, + Type: tmproto.PrecommitType, BlockID: tc.blockIDs[n], Timestamp: tmtime.Now(), } @@ -506,95 +597,712 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } } -func TestSignedHeaderValidateBasic(t *testing.T) { - commit := randCommit(time.Now()) - chainID := "𠜎" - timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) +func TestBlockIDValidateBasic(t *testing.T) { + validBlockID := BlockID{ + Hash: bytes.HexBytes{}, + PartSetHeader: PartSetHeader{ + Total: 1, + Hash: bytes.HexBytes{}, + }, + } + + invalidBlockID := BlockID{ + Hash: []byte{0}, + PartSetHeader: PartSetHeader{ + Total: 1, + Hash: []byte{0}, + }, + } + + testCases := []struct { + testName string + blockIDHash bytes.HexBytes + blockIDPartSetHeader PartSetHeader + expectErr bool + }{ + {"Valid BlockID", validBlockID.Hash, validBlockID.PartSetHeader, false}, + {"Invalid BlockID", invalidBlockID.Hash, validBlockID.PartSetHeader, true}, + {"Invalid BlockID", validBlockID.Hash, invalidBlockID.PartSetHeader, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + blockID := BlockID{ + Hash: tc.blockIDHash, + PartSetHeader: tc.blockIDPartSetHeader, + } + assert.Equal(t, tc.expectErr, blockID.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestBlockProtoBuf(t *testing.T) { + h := tmrand.Int63() + c1 := randCommit(time.Now()) + b1 := MakeBlock(h, []Tx{Tx([]byte{1})}, &Commit{Signatures: []CommitSig{}}, []Evidence{}) + b1.ProposerAddress = tmrand.Bytes(crypto.AddressSize) + + b2 := MakeBlock(h, []Tx{Tx([]byte{1})}, c1, []Evidence{}) + b2.ProposerAddress = tmrand.Bytes(crypto.AddressSize) + evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + evi := NewMockDuplicateVoteEvidence(h, evidenceTime, "block-test-chain") + b2.Evidence = EvidenceData{Evidence: EvidenceList{evi}} + b2.EvidenceHash = b2.Evidence.Hash() + + b3 := MakeBlock(h, []Tx{}, c1, []Evidence{}) + b3.ProposerAddress = tmrand.Bytes(crypto.AddressSize) + testCases := []struct { + msg string + b1 *Block + expPass bool + expPass2 bool + }{ + {"nil block", nil, false, false}, + {"b1", b1, true, true}, + {"b2", b2, true, true}, + {"b3", b3, true, true}, + } + for _, tc := range testCases { + pb, err := tc.b1.ToProto() + if tc.expPass { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + + block, err := BlockFromProto(pb) + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.EqualValues(t, tc.b1.Header, block.Header, tc.msg) + require.EqualValues(t, tc.b1.Data, block.Data, tc.msg) + require.EqualValues(t, tc.b1.Evidence.Evidence, block.Evidence.Evidence, tc.msg) + require.EqualValues(t, *tc.b1.LastCommit, *block.LastCommit, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func TestDataProtoBuf(t *testing.T) { + data := &Data{Txs: Txs{Tx([]byte{1}), Tx([]byte{2}), Tx([]byte{3})}} + data2 := &Data{Txs: Txs{}} + testCases := []struct { + msg string + data1 *Data + expPass bool + }{ + {"success", data, true}, + {"success data2", data2, true}, + } + for _, tc := range testCases { + protoData := tc.data1.ToProto() + d, err := DataFromProto(&protoData) + if tc.expPass { + require.NoError(t, err, tc.msg) + require.EqualValues(t, tc.data1, &d, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +// TestEvidenceDataProtoBuf ensures parity in converting to and from proto. +func TestEvidenceDataProtoBuf(t *testing.T) { + const chainID = "mychain" + ev := NewMockDuplicateVoteEvidence(math.MaxInt64, time.Now(), chainID) + data := &EvidenceData{Evidence: EvidenceList{ev}} + _ = data.ByteSize() + testCases := []struct { + msg string + data1 *EvidenceData + expPass1 bool + expPass2 bool + }{ + {"success", data, true, true}, + {"empty evidenceData", &EvidenceData{Evidence: EvidenceList{}}, true, true}, + {"fail nil Data", nil, false, false}, + } + + for _, tc := range testCases { + protoData, err := tc.data1.ToProto() + if tc.expPass1 { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + + eviD := new(EvidenceData) + err = eviD.FromProto(protoData) + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.Equal(t, tc.data1, eviD, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func makeRandHeader() Header { + chainID := "test" + t := time.Now() + height := tmrand.Int63() + randBytes := tmrand.Bytes(tmhash.Size) + randAddress := tmrand.Bytes(crypto.AddressSize) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 1}, ChainID: chainID, - Height: commit.Height, - Time: timestamp, - LastBlockID: commit.BlockID, - LastCommitHash: commit.Hash(), - DataHash: commit.Hash(), - ValidatorsHash: commit.Hash(), - NextValidatorsHash: commit.Hash(), - ConsensusHash: commit.Hash(), - AppHash: commit.Hash(), - LastResultsHash: commit.Hash(), - EvidenceHash: commit.Hash(), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Height: height, + Time: t, + LastBlockID: BlockID{}, + LastCommitHash: randBytes, + DataHash: randBytes, + ValidatorsHash: randBytes, + NextValidatorsHash: randBytes, + ConsensusHash: randBytes, + AppHash: randBytes, + + LastResultsHash: randBytes, + + EvidenceHash: randBytes, + ProposerAddress: randAddress, + } + + return h +} + +func TestHeaderProto(t *testing.T) { + h1 := makeRandHeader() + tc := []struct { + msg string + h1 *Header + expPass bool + }{ + {"success", &h1, true}, + {"failure empty Header", &Header{}, false}, + } + + for _, tt := range tc { + tt := tt + t.Run(tt.msg, func(t *testing.T) { + pb := tt.h1.ToProto() + h, err := HeaderFromProto(pb) + if tt.expPass { + require.NoError(t, err, tt.msg) + require.Equal(t, tt.h1, &h, tt.msg) + } else { + require.Error(t, err, tt.msg) + } + + }) + } +} + +func TestBlockIDProtoBuf(t *testing.T) { + blockID := makeBlockID([]byte("hash"), 2, []byte("part_set_hash")) + testCases := []struct { + msg string + bid1 *BlockID + expPass bool + }{ + {"success", &blockID, true}, + {"success empty", &BlockID{}, true}, + {"failure BlockID nil", nil, false}, + } + for _, tc := range testCases { + protoBlockID := tc.bid1.ToProto() + + bi, err := BlockIDFromProto(&protoBlockID) + if tc.expPass { + require.NoError(t, err) + require.Equal(t, tc.bid1, bi, tc.msg) + } else { + require.NotEqual(t, tc.bid1, bi, tc.msg) + } } +} + +func TestSignedHeaderProtoBuf(t *testing.T) { + commit := randCommit(time.Now()) + h := makeRandHeader() - validSignedHeader := SignedHeader{Header: &h, Commit: commit} - validSignedHeader.Commit.BlockID.Hash = validSignedHeader.Hash() - invalidSignedHeader := SignedHeader{} + sh := SignedHeader{Header: &h, Commit: commit} testCases := []struct { - testName string - shHeader *Header - shCommit *Commit + msg string + sh1 *SignedHeader + expPass bool + }{ + {"empty SignedHeader 2", &SignedHeader{}, true}, + {"success", &sh, true}, + {"failure nil", nil, false}, + } + for _, tc := range testCases { + protoSignedHeader := tc.sh1.ToProto() + + sh, err := SignedHeaderFromProto(protoSignedHeader) + + if tc.expPass { + require.NoError(t, err, tc.msg) + require.Equal(t, tc.sh1, sh, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func TestBlockIDEquals(t *testing.T) { + var ( + blockID = makeBlockID([]byte("hash"), 2, []byte("part_set_hash")) + blockIDDuplicate = makeBlockID([]byte("hash"), 2, []byte("part_set_hash")) + blockIDDifferent = makeBlockID([]byte("different_hash"), 2, []byte("part_set_hash")) + blockIDEmpty = BlockID{} + ) + + assert.True(t, blockID.Equals(blockIDDuplicate)) + assert.False(t, blockID.Equals(blockIDDifferent)) + assert.False(t, blockID.Equals(blockIDEmpty)) + assert.True(t, blockIDEmpty.Equals(blockIDEmpty)) + assert.False(t, blockIDEmpty.Equals(blockIDDifferent)) +} + +func TestCommitSig_ValidateBasic(t *testing.T) { + testCases := []struct { + name string + cs CommitSig expectErr bool + errString string }{ - {"Valid Signed Header", validSignedHeader.Header, validSignedHeader.Commit, false}, - {"Invalid Signed Header", invalidSignedHeader.Header, validSignedHeader.Commit, true}, - {"Invalid Signed Header", validSignedHeader.Header, invalidSignedHeader.Commit, true}, + { + "invalid ID flag", + CommitSig{BlockIDFlag: BlockIDFlag(0xFF)}, + true, "unknown BlockIDFlag", + }, + { + "BlockIDFlagAbsent validator address present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, ValidatorAddress: crypto.Address("testaddr")}, + true, "validator address is present", + }, + { + "BlockIDFlagAbsent timestamp present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, Timestamp: time.Now().UTC()}, + true, "time is present", + }, + { + "BlockIDFlagAbsent signatures present", + CommitSig{BlockIDFlag: BlockIDFlagAbsent, Signature: []byte{0xAA}}, + true, "signature is present", + }, + { + "BlockIDFlagAbsent valid BlockIDFlagAbsent", + CommitSig{BlockIDFlag: BlockIDFlagAbsent}, + false, "", + }, + { + "non-BlockIDFlagAbsent invalid validator address", + CommitSig{BlockIDFlag: BlockIDFlagCommit, ValidatorAddress: make([]byte, 1)}, + true, "expected ValidatorAddress size", + }, + { + "non-BlockIDFlagAbsent invalid signature (zero)", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, 0), + }, + true, "signature is missing", + }, + { + "non-BlockIDFlagAbsent invalid signature (too large)", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize+1), + }, + true, "signature is too big", + }, + { + "non-BlockIDFlagAbsent valid", + CommitSig{ + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize), + }, + false, "", + }, } for _, tc := range testCases { tc := tc - t.Run(tc.testName, func(t *testing.T) { - sh := SignedHeader{ - Header: tc.shHeader, - Commit: tc.shCommit, + + t.Run(tc.name, func(t *testing.T) { + err := tc.cs.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) } - assert.Equal( - t, - tc.expectErr, - sh.ValidateBasic(validSignedHeader.Header.ChainID) != nil, - "Validate Basic had an unexpected result", - ) }) } } -func TestBlockIDValidateBasic(t *testing.T) { - validBlockID := BlockID{ - Hash: bytes.HexBytes{}, - PartsHeader: PartSetHeader{ - Total: 1, - Hash: bytes.HexBytes{}, +func TestHeader_ValidateBasic(t *testing.T) { + testCases := []struct { + name string + header Header + expectErr bool + errString string + }{ + { + "invalid version block", + Header{Version: tmversion.Consensus{Block: version.BlockProtocol + 1}}, + true, "block protocol is incorrect", + }, + { + "invalid chain ID length", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen+1)), + }, + true, "chainID is too long", + }, + { + "invalid height (negative)", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: -1, + }, + true, "negative Height", + }, + { + "invalid height (zero)", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 0, + }, + true, "zero Height", + }, + { + "invalid block ID hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size+1), + }, + }, + true, "wrong Hash", + }, + { + "invalid block ID parts header hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size+1), + }, + }, + }, + true, "wrong PartSetHeader", + }, + { + "invalid last commit hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size+1), + }, + true, "wrong LastCommitHash", + }, + { + "invalid data hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size+1), + }, + true, "wrong DataHash", + }, + { + "invalid evidence hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size+1), + }, + true, "wrong EvidenceHash", + }, + { + "invalid proposer address", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize+1), + }, + true, "invalid ProposerAddress length", + }, + { + "invalid validator hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong ValidatorsHash", + }, + { + "invalid next validator hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong NextValidatorsHash", + }, + { + "invalid consensus hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size+1), + }, + true, "wrong ConsensusHash", + }, + { + "invalid last results hash", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size), + LastResultsHash: make([]byte, tmhash.Size+1), + }, + true, "wrong LastResultsHash", + }, + { + "valid header", + Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol}, + ChainID: string(make([]byte, MaxChainIDLen)), + Height: 1, + LastBlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + LastCommitHash: make([]byte, tmhash.Size), + DataHash: make([]byte, tmhash.Size), + EvidenceHash: make([]byte, tmhash.Size), + ProposerAddress: make([]byte, crypto.AddressSize), + ValidatorsHash: make([]byte, tmhash.Size), + NextValidatorsHash: make([]byte, tmhash.Size), + ConsensusHash: make([]byte, tmhash.Size), + LastResultsHash: make([]byte, tmhash.Size), + }, + false, "", }, } - invalidBlockID := BlockID{ - Hash: []byte{0}, - PartsHeader: PartSetHeader{ - Total: -1, - Hash: bytes.HexBytes{}, - }, + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + err := tc.header.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) + } + }) } +} +func TestCommit_ValidateBasic(t *testing.T) { testCases := []struct { - testName string - blockIDHash bytes.HexBytes - blockIDPartsHeader PartSetHeader - expectErr bool + name string + commit *Commit + expectErr bool + errString string }{ - {"Valid BlockID", validBlockID.Hash, validBlockID.PartsHeader, false}, - {"Invalid BlockID", invalidBlockID.Hash, validBlockID.PartsHeader, true}, - {"Invalid BlockID", validBlockID.Hash, invalidBlockID.PartsHeader, true}, + { + "invalid height", + &Commit{Height: -1}, + true, "negative Height", + }, + { + "invalid round", + &Commit{Height: 1, Round: -1}, + true, "negative Round", + }, + { + "invalid block ID", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{}, + }, + true, "commit cannot be for nil block", + }, + { + "no signatures", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + }, + true, "no signatures in commit", + }, + { + "invalid signature", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + Signatures: []CommitSig{ + { + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize+1), + }, + }, + }, + true, "wrong CommitSig", + }, + { + "valid commit", + &Commit{ + Height: 1, + Round: 1, + BlockID: BlockID{ + Hash: make([]byte, tmhash.Size), + PartSetHeader: PartSetHeader{ + Hash: make([]byte, tmhash.Size), + }, + }, + Signatures: []CommitSig{ + { + BlockIDFlag: BlockIDFlagCommit, + ValidatorAddress: make([]byte, crypto.AddressSize), + Signature: make([]byte, MaxSignatureSize), + }, + }, + }, + false, "", + }, } for _, tc := range testCases { tc := tc - t.Run(tc.testName, func(t *testing.T) { - blockID := BlockID{ - Hash: tc.blockIDHash, - PartsHeader: tc.blockIDPartsHeader, + + t.Run(tc.name, func(t *testing.T) { + err := tc.commit.ValidateBasic() + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) } - assert.Equal(t, tc.expectErr, blockID.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) } } diff --git a/types/canonical.go b/types/canonical.go index 59c52d741..49d98405d 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -3,7 +3,7 @@ package types import ( "time" - "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -12,69 +12,52 @@ import ( // TimeFormat is used for generating the sigs const TimeFormat = time.RFC3339Nano -type CanonicalBlockID struct { - Hash bytes.HexBytes - PartsHeader CanonicalPartSetHeader -} - -type CanonicalPartSetHeader struct { - Hash bytes.HexBytes - Total int -} - -type CanonicalProposal struct { - Type SignedMsgType // type alias for byte - Height int64 `binary:"fixed64"` - Round int64 `binary:"fixed64"` - POLRound int64 `binary:"fixed64"` - BlockID CanonicalBlockID - Timestamp time.Time - ChainID string -} - -type CanonicalVote struct { - Type SignedMsgType // type alias for byte - Height int64 `binary:"fixed64"` - Round int64 `binary:"fixed64"` - BlockID CanonicalBlockID - Timestamp time.Time - ChainID string -} - //----------------------------------- // Canonicalize the structs -func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID { - return CanonicalBlockID{ - Hash: blockID.Hash, - PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader), +func CanonicalizeBlockID(bid tmproto.BlockID) *tmproto.CanonicalBlockID { + rbid, err := BlockIDFromProto(&bid) + if err != nil { + panic(err) + } + var cbid *tmproto.CanonicalBlockID + if rbid == nil || rbid.IsZero() { + cbid = nil + } else { + cbid = &tmproto.CanonicalBlockID{ + Hash: bid.Hash, + PartSetHeader: CanonicalizePartSetHeader(bid.PartSetHeader), + } } + + return cbid } -func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { - return CanonicalPartSetHeader{ - psh.Hash, - psh.Total, - } +// CanonicalizeVote transforms the given PartSetHeader to a CanonicalPartSetHeader. +func CanonicalizePartSetHeader(psh tmproto.PartSetHeader) tmproto.CanonicalPartSetHeader { + return tmproto.CanonicalPartSetHeader(psh) } -func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { - return CanonicalProposal{ - Type: ProposalType, - Height: proposal.Height, - Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) - POLRound: int64(proposal.POLRound), +// CanonicalizeVote transforms the given Proposal to a CanonicalProposal. +func CanonicalizeProposal(chainID string, proposal *tmproto.Proposal) tmproto.CanonicalProposal { + return tmproto.CanonicalProposal{ + Type: tmproto.ProposalType, + Height: proposal.Height, // encoded as sfixed64 + Round: int64(proposal.Round), // encoded as sfixed64 + POLRound: int64(proposal.PolRound), BlockID: CanonicalizeBlockID(proposal.BlockID), Timestamp: proposal.Timestamp, ChainID: chainID, } } -func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { - return CanonicalVote{ +// CanonicalizeVote transforms the given Vote to a CanonicalVote, which does +// not contain ValidatorIndex and ValidatorAddress fields. +func CanonicalizeVote(chainID string, vote *tmproto.Vote) tmproto.CanonicalVote { + return tmproto.CanonicalVote{ Type: vote.Type, - Height: vote.Height, - Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + Height: vote.Height, // encoded as sfixed64 + Round: int64(vote.Round), // encoded as sfixed64 BlockID: CanonicalizeBlockID(vote.BlockID), Timestamp: vote.Timestamp, ChainID: chainID, diff --git a/types/canonical_test.go b/types/canonical_test.go new file mode 100644 index 000000000..53a8ea52f --- /dev/null +++ b/types/canonical_test.go @@ -0,0 +1,39 @@ +package types + +import ( + "reflect" + "testing" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" +) + +func TestCanonicalizeBlockID(t *testing.T) { + randhash := tmrand.Bytes(tmhash.Size) + block1 := tmproto.BlockID{Hash: randhash, + PartSetHeader: tmproto.PartSetHeader{Total: 5, Hash: randhash}} + block2 := tmproto.BlockID{Hash: randhash, + PartSetHeader: tmproto.PartSetHeader{Total: 10, Hash: randhash}} + cblock1 := tmproto.CanonicalBlockID{Hash: randhash, + PartSetHeader: tmproto.CanonicalPartSetHeader{Total: 5, Hash: randhash}} + cblock2 := tmproto.CanonicalBlockID{Hash: randhash, + PartSetHeader: tmproto.CanonicalPartSetHeader{Total: 10, Hash: randhash}} + + tests := []struct { + name string + args tmproto.BlockID + want *tmproto.CanonicalBlockID + }{ + {"first", block1, &cblock1}, + {"second", block2, &cblock2}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + if got := CanonicalizeBlockID(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("CanonicalizeBlockID() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/types/codec.go b/types/codec.go deleted file mode 100644 index b4989d267..000000000 --- a/types/codec.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - amino "github.com/tendermint/go-amino" - - cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterBlockAmino(cdc) -} - -func RegisterBlockAmino(cdc *amino.Codec) { - cryptoamino.RegisterAmino(cdc) - RegisterEvidences(cdc) -} - -// GetCodec returns a codec used by the package. For testing purposes only. -func GetCodec() *amino.Codec { - return cdc -} - -// For testing purposes only -func RegisterMockEvidencesGlobal() { - RegisterMockEvidences(cdc) -} diff --git a/types/encoding_helper.go b/types/encoding_helper.go index a5c278938..630b088ce 100644 --- a/types/encoding_helper.go +++ b/types/encoding_helper.go @@ -1,10 +1,47 @@ package types +import ( + gogotypes "github.com/gogo/protobuf/types" + + "github.com/tendermint/tendermint/libs/bytes" +) + // cdcEncode returns nil if the input is nil, otherwise returns -// cdc.MustMarshalBinaryBare(item) +// proto.Marshal(Value{Value: item}) func cdcEncode(item interface{}) []byte { if item != nil && !isTypedNil(item) && !isEmpty(item) { - return cdc.MustMarshalBinaryBare(item) + switch item := item.(type) { + case string: + i := gogotypes.StringValue{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + case int64: + i := gogotypes.Int64Value{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + case bytes.HexBytes: + i := gogotypes.BytesValue{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + default: + return nil + } } + return nil } diff --git a/types/event_bus.go b/types/event_bus.go index 1c838d13b..72ba5e3d6 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -59,7 +59,9 @@ func (b *EventBus) OnStart() error { } func (b *EventBus) OnStop() { - b.pubsub.Stop() + if err := b.pubsub.Stop(); err != nil { + b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) + } } func (b *EventBus) NumClients() int { @@ -156,6 +158,10 @@ func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) erro return b.pubsub.PublishWithEvents(ctx, data, events) } +func (b *EventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { + return b.Publish(EventNewEvidence, evidence) +} + func (b *EventBus) PublishEventVote(data EventDataVote) error { return b.Publish(EventVote, data) } @@ -175,7 +181,7 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { // add predefined compositeKeys events[EventTypeKey] = append(events[EventTypeKey], EventTx) - events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", data.Tx.Hash())) + events[TxHashKey] = append(events[TxHashKey], fmt.Sprintf("%X", Tx(data.Tx).Hash())) events[TxHeightKey] = append(events[TxHeightKey], fmt.Sprintf("%d", data.Height)) return b.pubsub.PublishWithEvents(ctx, data, events) @@ -249,6 +255,10 @@ func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) erro return nil } +func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { + return nil +} + func (NopEventBus) PublishEventVote(data EventDataVote) error { return nil } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 14b6d96e0..a0a2e2e5f 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/kv" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -21,13 +20,17 @@ func TestEventBusPublishEventTx(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) tx := Tx("foo") result := abci.ResponseDeliverTx{ Data: []byte("bar"), Events: []abci.Event{ - {Type: "testType", Attributes: []kv.Pair{{Key: []byte("baz"), Value: []byte("1")}}}, + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, }, } @@ -42,12 +45,12 @@ func TestEventBusPublishEventTx(t *testing.T) { edt := msg.Data().(EventDataTx) assert.Equal(t, int64(1), edt.Height) assert.Equal(t, uint32(0), edt.Index) - assert.Equal(t, tx, edt.Tx) + assert.EqualValues(t, tx, edt.Tx) assert.Equal(t, result, edt.Result) close(done) }() - err = eventBus.PublishEventTx(EventDataTx{TxResult{ + err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ Height: 1, Index: 0, Tx: tx, @@ -66,17 +69,21 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) block := MakeBlock(0, []Tx{}, nil, []Evidence{}) resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []kv.Pair{{Key: []byte("baz"), Value: []byte("1")}}}, + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, }, } resultEndBlock := abci.ResponseEndBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []kv.Pair{{Key: []byte("foz"), Value: []byte("2")}}}, + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("foz"), Value: []byte("2")}}}, }, } @@ -113,7 +120,11 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) tx := Tx("foo") result := abci.ResponseDeliverTx{ @@ -121,7 +132,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { Events: []abci.Event{ { Type: "transfer", - Attributes: []kv.Pair{ + Attributes: []abci.EventAttribute{ {Key: []byte("sender"), Value: []byte("foo")}, {Key: []byte("recipient"), Value: []byte("bar")}, {Key: []byte("amount"), Value: []byte("5")}, @@ -129,7 +140,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { }, { Type: "transfer", - Attributes: []kv.Pair{ + Attributes: []abci.EventAttribute{ {Key: []byte("sender"), Value: []byte("baz")}, {Key: []byte("recipient"), Value: []byte("cat")}, {Key: []byte("amount"), Value: []byte("13")}, @@ -137,7 +148,7 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { }, { Type: "withdraw.rewards", - Attributes: []kv.Pair{ + Attributes: []abci.EventAttribute{ {Key: []byte("address"), Value: []byte("bar")}, {Key: []byte("source"), Value: []byte("iceman")}, {Key: []byte("amount"), Value: []byte("33")}, @@ -179,16 +190,20 @@ func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { done := make(chan struct{}) go func() { - msg := <-sub.Out() - data := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), data.Height) - assert.Equal(t, uint32(0), data.Index) - assert.Equal(t, tx, data.Tx) - assert.Equal(t, result, data.Result) - close(done) + select { + case msg := <-sub.Out(): + data := msg.Data().(EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.EqualValues(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + close(done) + case <-time.After(1 * time.Second): + return + } }() - err = eventBus.PublishEventTx(EventDataTx{TxResult{ + err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ Height: 1, Index: 0, Tx: tx, @@ -213,17 +228,21 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) block := MakeBlock(0, []Tx{}, nil, []Evidence{}) resultBeginBlock := abci.ResponseBeginBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []kv.Pair{{Key: []byte("baz"), Value: []byte("1")}}}, + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("baz"), Value: []byte("1")}}}, }, } resultEndBlock := abci.ResponseEndBlock{ Events: []abci.Event{ - {Type: "testType", Attributes: []kv.Pair{{Key: []byte("foz"), Value: []byte("2")}}}, + {Type: "testType", Attributes: []abci.EventAttribute{{Key: []byte("foz"), Value: []byte("2")}}}, }, } @@ -256,11 +275,53 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { } } +func TestEventBusPublishEventNewEvidence(t *testing.T) { + eventBus := NewEventBus() + err := eventBus.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) + + ev := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id") + + query := "tm.event='NewEvidence'" + evSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query)) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + msg := <-evSub.Out() + edt := msg.Data().(EventDataNewEvidence) + assert.Equal(t, ev, edt.Evidence) + assert.Equal(t, int64(4), edt.Height) + close(done) + }() + + err = eventBus.PublishEventNewEvidence(EventDataNewEvidence{ + Evidence: ev, + Height: 4, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + func TestEventBusPublish(t *testing.T) { eventBus := NewEventBus() err := eventBus.Start() require.NoError(t, err) - defer eventBus.Stop() + t.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + t.Error(err) + } + }) const numEventsExpected = 14 @@ -352,8 +413,15 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes rand.Seed(time.Now().Unix()) eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache - eventBus.Start() - defer eventBus.Stop() + err := eventBus.Start() + if err != nil { + b.Error(err) + } + b.Cleanup(func() { + if err := eventBus.Stop(); err != nil { + b.Error(err) + } + }) ctx := context.Background() q := EventQueryNewBlock @@ -386,7 +454,10 @@ func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *tes eventType = randEvent() } - eventBus.Publish(eventType, EventDataString("Gamora")) + err := eventBus.Publish(eventType, EventDataString("Gamora")) + if err != nil { + b.Error(err) + } } } diff --git a/types/events.go b/types/events.go index c257ba328..38b356983 100644 --- a/types/events.go +++ b/types/events.go @@ -3,9 +3,8 @@ package types import ( "fmt" - amino "github.com/tendermint/go-amino" - abci "github.com/tendermint/tendermint/abci/types" + tmjson "github.com/tendermint/tendermint/libs/json" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) @@ -19,6 +18,7 @@ const ( // All of this data can be fetched through the rpc. EventNewBlock = "NewBlock" EventNewBlockHeader = "NewBlockHeader" + EventNewEvidence = "NewEvidence" EventTx = "Tx" EventValidatorSetUpdates = "ValidatorSetUpdates" @@ -38,26 +38,24 @@ const ( EventVote = "Vote" ) -/////////////////////////////////////////////////////////////////////////////// // ENCODING / DECODING -/////////////////////////////////////////////////////////////////////////////// // TMEventData implements events.EventData. type TMEventData interface { // empty interface } -func RegisterEventDatas(cdc *amino.Codec) { - cdc.RegisterInterface((*TMEventData)(nil), nil) - cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil) - cdc.RegisterConcrete(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader", nil) - cdc.RegisterConcrete(EventDataTx{}, "tendermint/event/Tx", nil) - cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil) - cdc.RegisterConcrete(EventDataNewRound{}, "tendermint/event/NewRound", nil) - cdc.RegisterConcrete(EventDataCompleteProposal{}, "tendermint/event/CompleteProposal", nil) - cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) - cdc.RegisterConcrete(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates", nil) - cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) +func init() { + tmjson.RegisterType(EventDataNewBlock{}, "tendermint/event/NewBlock") + tmjson.RegisterType(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader") + tmjson.RegisterType(EventDataNewEvidence{}, "tendermint/event/NewEvidence") + tmjson.RegisterType(EventDataTx{}, "tendermint/event/Tx") + tmjson.RegisterType(EventDataRoundState{}, "tendermint/event/RoundState") + tmjson.RegisterType(EventDataNewRound{}, "tendermint/event/NewRound") + tmjson.RegisterType(EventDataCompleteProposal{}, "tendermint/event/CompleteProposal") + tmjson.RegisterType(EventDataVote{}, "tendermint/event/Vote") + tmjson.RegisterType(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates") + tmjson.RegisterType(EventDataString(""), "tendermint/event/ProposalString") } // Most event messages are basic types (a block, a transaction) @@ -78,26 +76,32 @@ type EventDataNewBlockHeader struct { ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` } +type EventDataNewEvidence struct { + Evidence Evidence `json:"evidence"` + + Height int64 `json:"height"` +} + // All txs fire EventDataTx type EventDataTx struct { - TxResult + abci.TxResult } // NOTE: This goes into the replay WAL type EventDataRoundState struct { Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` Step string `json:"step"` } type ValidatorInfo struct { Address Address `json:"address"` - Index int `json:"index"` + Index int32 `json:"index"` } type EventDataNewRound struct { Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` Step string `json:"step"` Proposer ValidatorInfo `json:"proposer"` @@ -105,7 +109,7 @@ type EventDataNewRound struct { type EventDataCompleteProposal struct { Height int64 `json:"height"` - Round int `json:"round"` + Round int32 `json:"round"` Step string `json:"step"` BlockID BlockID `json:"block_id"` @@ -121,9 +125,7 @@ type EventDataValidatorSetUpdates struct { ValidatorUpdates []*Validator `json:"validator_updates"` } -/////////////////////////////////////////////////////////////////////////////// // PUBSUB -/////////////////////////////////////////////////////////////////////////////// const ( // EventTypeKey is a reserved composite key for event name. @@ -141,6 +143,7 @@ var ( EventQueryLock = QueryForEvent(EventLock) EventQueryNewBlock = QueryForEvent(EventNewBlock) EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) + EventQueryNewEvidence = QueryForEvent(EventNewEvidence) EventQueryNewRound = QueryForEvent(EventNewRound) EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) EventQueryPolka = QueryForEvent(EventPolka) @@ -166,6 +169,7 @@ func QueryForEvent(eventType string) tmpubsub.Query { type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error + PublishEventNewEvidence(evidence EventDataNewEvidence) error PublishEventTx(EventDataTx) error PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error } diff --git a/types/events_test.go b/types/events_test.go index a4b71d922..12f75b74d 100644 --- a/types/events_test.go +++ b/types/events_test.go @@ -20,4 +20,8 @@ func TestQueryForEvent(t *testing.T) { "tm.event='NewBlock'", QueryForEvent(EventNewBlock).String(), ) + assert.Equal(t, + "tm.event='NewEvidence'", + QueryForEvent(EventNewEvidence).String(), + ) } diff --git a/types/evidence.go b/types/evidence.go index 244244f9e..8007763b7 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -2,117 +2,60 @@ package types import ( "bytes" + "encoding/binary" + "errors" "fmt" + "sort" "strings" "time" - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/crypto/tmhash" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + tmjson "github.com/tendermint/tendermint/libs/json" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -const ( - // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). - MaxEvidenceBytes int64 = 484 -) - -// ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. -type ErrEvidenceInvalid struct { - Evidence Evidence - ErrorValue error -} - -// NewErrEvidenceInvalid returns a new EvidenceInvalid with the given err. -func NewErrEvidenceInvalid(ev Evidence, err error) *ErrEvidenceInvalid { - return &ErrEvidenceInvalid{ev, err} -} - -// Error returns a string representation of the error. -func (err *ErrEvidenceInvalid) Error() string { - return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence) -} - -// ErrEvidenceOverflow is for when there is too much evidence in a block. -type ErrEvidenceOverflow struct { - MaxNum int64 - GotNum int64 -} - -// NewErrEvidenceOverflow returns a new ErrEvidenceOverflow where got > max. -func NewErrEvidenceOverflow(max, got int64) *ErrEvidenceOverflow { - return &ErrEvidenceOverflow{max, got} -} - -// Error returns a string representation of the error. -func (err *ErrEvidenceOverflow) Error() string { - return fmt.Sprintf("Too much evidence: Max %d, got %d", err.MaxNum, err.GotNum) -} - -//------------------------------------------- - -// Evidence represents any provable malicious activity by a validator +// Evidence represents any provable malicious activity by a validator. +// Verification logic for each evidence is part of the evidence module. type Evidence interface { - Height() int64 // height of the equivocation - Time() time.Time // time of the equivocation - Address() []byte // address of the equivocating validator - Bytes() []byte // bytes which comprise the evidence - Hash() []byte // hash of the evidence - Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence - Equal(Evidence) bool // check equality of evidence - - ValidateBasic() error - String() string -} - -func RegisterEvidences(cdc *amino.Codec) { - cdc.RegisterInterface((*Evidence)(nil), nil) - cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil) -} - -func RegisterMockEvidences(cdc *amino.Codec) { - cdc.RegisterConcrete(MockEvidence{}, "tendermint/MockEvidence", nil) - cdc.RegisterConcrete(MockRandomEvidence{}, "tendermint/MockRandomEvidence", nil) -} - -const ( - MaxEvidenceBytesDenominator = 10 -) - -// MaxEvidencePerBlock returns the maximum number of evidences -// allowed in the block and their maximum total size (limitted to 1/10th -// of the maximum block size). -// TODO: change to a constant, or to a fraction of the validator set size. -// See https://github.com/tendermint/tendermint/issues/2590 -func MaxEvidencePerBlock(blockMaxBytes int64) (int64, int64) { - maxBytes := blockMaxBytes / MaxEvidenceBytesDenominator - maxNum := maxBytes / MaxEvidenceBytes - return maxNum, maxBytes + ABCI() []abci.Evidence // forms individual evidence to be sent to the application + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Height() int64 // height of the infraction + String() string // string format of the evidence + Time() time.Time // time of the infraction + ValidateBasic() error // basic consistency check } -//------------------------------------------- +//-------------------------------------------------------------------------------------- -// DuplicateVoteEvidence contains evidence a validator signed two conflicting -// votes. +// DuplicateVoteEvidence contains evidence of a single validator signing two conflicting votes. type DuplicateVoteEvidence struct { - PubKey crypto.PubKey - VoteA *Vote - VoteB *Vote + VoteA *Vote `json:"vote_a"` + VoteB *Vote `json:"vote_b"` + + // abci specific information + TotalVotingPower int64 + ValidatorPower int64 + Timestamp time.Time } var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given // two conflicting votes. If one of the votes is nil, evidence returned is nil as well -func NewDuplicateVoteEvidence(pubkey crypto.PubKey, vote1 *Vote, vote2 *Vote) *DuplicateVoteEvidence { +func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *ValidatorSet) *DuplicateVoteEvidence { var voteA, voteB *Vote - if vote1 == nil || vote2 == nil { + if vote1 == nil || vote2 == nil || valSet == nil { + return nil + } + idx, val := valSet.GetByAddress(vote1.ValidatorAddress) + if idx == -1 { return nil } + if strings.Compare(vote1.BlockID.Key(), vote2.BlockID.Key()) == -1 { voteA = vote1 voteB = vote2 @@ -121,194 +64,352 @@ func NewDuplicateVoteEvidence(pubkey crypto.PubKey, vote1 *Vote, vote2 *Vote) *D voteB = vote1 } return &DuplicateVoteEvidence{ - PubKey: pubkey, - VoteA: voteA, - VoteB: voteB, + VoteA: voteA, + VoteB: voteB, + TotalVotingPower: valSet.TotalVotingPower(), + ValidatorPower: val.VotingPower, + Timestamp: blockTime, } } -// String returns a string representation of the evidence. -func (dve *DuplicateVoteEvidence) String() string { - return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB) - +// ABCI returns the application relevant representation of the evidence +func (dve *DuplicateVoteEvidence) ABCI() []abci.Evidence { + return []abci.Evidence{{ + Type: abci.EvidenceType_DUPLICATE_VOTE, + Validator: abci.Validator{ + Address: dve.VoteA.ValidatorAddress, + Power: dve.ValidatorPower, + }, + Height: dve.VoteA.Height, + Time: dve.Timestamp, + TotalVotingPower: dve.TotalVotingPower, + }} } -// Height returns the height this evidence refers to. -func (dve *DuplicateVoteEvidence) Height() int64 { - return dve.VoteA.Height +// Bytes returns the proto-encoded evidence as a byte array. +func (dve *DuplicateVoteEvidence) Bytes() []byte { + pbe := dve.ToProto() + bz, err := pbe.Marshal() + if err != nil { + panic(err) + } + + return bz } -// Time return the time the evidence was created -func (dve *DuplicateVoteEvidence) Time() time.Time { - return dve.VoteA.Timestamp +// Hash returns the hash of the evidence. +func (dve *DuplicateVoteEvidence) Hash() []byte { + return tmhash.Sum(dve.Bytes()) } -// Address returns the address of the validator. -func (dve *DuplicateVoteEvidence) Address() []byte { - return dve.PubKey.Address() +// Height returns the height of the infraction +func (dve *DuplicateVoteEvidence) Height() int64 { + return dve.VoteA.Height } -// Hash returns the hash of the evidence. -func (dve *DuplicateVoteEvidence) Bytes() []byte { - return cdcEncode(dve) +// String returns a string representation of the evidence. +func (dve *DuplicateVoteEvidence) String() string { + return fmt.Sprintf("DuplicateVoteEvidence{VoteA: %v, VoteB: %v}", dve.VoteA, dve.VoteB) } -// Hash returns the hash of the evidence. -func (dve *DuplicateVoteEvidence) Hash() []byte { - return tmhash.Sum(cdcEncode(dve)) +// Time returns the time of the infraction +func (dve *DuplicateVoteEvidence) Time() time.Time { + return dve.Timestamp } -// Verify returns an error if the two votes aren't conflicting. -// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks. -func (dve *DuplicateVoteEvidence) Verify(chainID string, pubKey crypto.PubKey) error { - // H/R/S must be the same - if dve.VoteA.Height != dve.VoteB.Height || - dve.VoteA.Round != dve.VoteB.Round || - dve.VoteA.Type != dve.VoteB.Type { - return fmt.Errorf("duplicateVoteEvidence Error: H/R/S does not match. Got %v and %v", dve.VoteA, dve.VoteB) +// ValidateBasic performs basic validation. +func (dve *DuplicateVoteEvidence) ValidateBasic() error { + if dve == nil { + return errors.New("empty duplicate vote evidence") } - // Address must be the same - if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) { - return fmt.Errorf( - "duplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", - dve.VoteA.ValidatorAddress, - dve.VoteB.ValidatorAddress, - ) + if dve.VoteA == nil || dve.VoteB == nil { + return fmt.Errorf("one or both of the votes are empty %v, %v", dve.VoteA, dve.VoteB) + } + if err := dve.VoteA.ValidateBasic(); err != nil { + return fmt.Errorf("invalid VoteA: %w", err) + } + if err := dve.VoteB.ValidateBasic(); err != nil { + return fmt.Errorf("invalid VoteB: %w", err) + } + // Enforce Votes are lexicographically sorted on blockID + if strings.Compare(dve.VoteA.BlockID.Key(), dve.VoteB.BlockID.Key()) >= 0 { + return errors.New("duplicate votes in invalid order") } + return nil +} - // Index must be the same - if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex { - return fmt.Errorf( - "duplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", - dve.VoteA.ValidatorIndex, - dve.VoteB.ValidatorIndex, - ) +// ToProto encodes DuplicateVoteEvidence to protobuf +func (dve *DuplicateVoteEvidence) ToProto() *tmproto.DuplicateVoteEvidence { + voteB := dve.VoteB.ToProto() + voteA := dve.VoteA.ToProto() + tp := tmproto.DuplicateVoteEvidence{ + VoteA: voteA, + VoteB: voteB, + TotalVotingPower: dve.TotalVotingPower, + ValidatorPower: dve.ValidatorPower, + Timestamp: dve.Timestamp, } + return &tp +} - // BlockIDs must be different - if dve.VoteA.BlockID.Equals(dve.VoteB.BlockID) { - return fmt.Errorf( - "duplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", - dve.VoteA.BlockID, - ) +// DuplicateVoteEvidenceFromProto decodes protobuf into DuplicateVoteEvidence +func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*DuplicateVoteEvidence, error) { + if pb == nil { + return nil, errors.New("nil duplicate vote evidence") } - // pubkey must match address (this should already be true, sanity check) - addr := dve.VoteA.ValidatorAddress - if !bytes.Equal(pubKey.Address(), addr) { - return fmt.Errorf("duplicateVoteEvidence FAILED SANITY CHECK - address (%X) doesn't match pubkey (%v - %X)", - addr, pubKey, pubKey.Address()) + vA, err := VoteFromProto(pb.VoteA) + if err != nil { + return nil, err } - // Signatures must be valid - if !pubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) { - return fmt.Errorf("duplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature) + vB, err := VoteFromProto(pb.VoteB) + if err != nil { + return nil, err } - if !pubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) { - return fmt.Errorf("duplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature) + + dve := &DuplicateVoteEvidence{ + VoteA: vA, + VoteB: vB, + TotalVotingPower: pb.TotalVotingPower, + ValidatorPower: pb.ValidatorPower, + Timestamp: pb.Timestamp, } - return nil + return dve, dve.ValidateBasic() } -// Equal checks if two pieces of evidence are equal. -func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { - if _, ok := ev.(*DuplicateVoteEvidence); !ok { - return false - } +//------------------------------------ LIGHT EVIDENCE -------------------------------------- + +// LightClientAttackEvidence is a generalized evidence that captures all forms of known attacks on +// a light client such that a full node can verify, propose and commit the evidence on-chain for +// punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation +// and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this at +// tendermint/docs/architecture/adr-047-handling-evidence-from-light-client.md +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock + CommonHeight int64 - // just check their hashes - dveHash := tmhash.Sum(cdcEncode(dve)) - evHash := tmhash.Sum(cdcEncode(ev)) - return bytes.Equal(dveHash, evHash) + // abci specific information + ByzantineValidators []*Validator // validators in the validator set that misbehaved in creating the conflicting block + TotalVotingPower int64 // total voting power of the validator set at the common height + Timestamp time.Time // timestamp of the block at the common height } -// ValidateBasic performs basic validation. -func (dve *DuplicateVoteEvidence) ValidateBasic() error { - if len(dve.PubKey.Bytes()) == 0 { - return errors.New("empty PubKey") - } - if dve.VoteA == nil || dve.VoteB == nil { - return fmt.Errorf("one or both of the votes are empty %v, %v", dve.VoteA, dve.VoteB) +var _ Evidence = &LightClientAttackEvidence{} + +// ABCI forms an array of abci evidence for each byzantine validator +func (l *LightClientAttackEvidence) ABCI() []abci.Evidence { + abciEv := make([]abci.Evidence, len(l.ByzantineValidators)) + for idx, val := range l.ByzantineValidators { + abciEv[idx] = abci.Evidence{ + Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, + Validator: TM2PB.Validator(val), + Height: l.Height(), + Time: l.Timestamp, + TotalVotingPower: l.TotalVotingPower, + } } - if err := dve.VoteA.ValidateBasic(); err != nil { - return fmt.Errorf("invalid VoteA: %v", err) + return abciEv +} + +// Bytes returns the proto-encoded evidence as a byte array +func (l *LightClientAttackEvidence) Bytes() []byte { + pbe, err := l.ToProto() + if err != nil { + panic(err) } - if err := dve.VoteB.ValidateBasic(); err != nil { - return fmt.Errorf("invalid VoteB: %v", err) + bz, err := pbe.Marshal() + if err != nil { + panic(err) } - // Enforce Votes are lexicographically sorted on blockID - if strings.Compare(dve.VoteA.BlockID.Key(), dve.VoteB.BlockID.Key()) >= 0 { - return errors.New("duplicate votes in invalid order") + return bz +} + +// GetByzantineValidators finds out what style of attack LightClientAttackEvidence was and then works out who +// the malicious validators were and returns them. This is used both for forming the ByzantineValidators +// field and for validating that it is correct. Validators are ordered based on validator power +func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *ValidatorSet, + trusted *SignedHeader) []*Validator { + var validators []*Validator + // First check if the header is invalid. This means that it is a lunatic attack and therefore we take the + // validators who are in the commonVals and voted for the lunatic header + if l.ConflictingHeaderIsInvalid(trusted.Header) { + for _, commitSig := range l.ConflictingBlock.Commit.Signatures { + if !commitSig.ForBlock() { + continue + } + + _, val := commonVals.GetByAddress(commitSig.ValidatorAddress) + if val == nil { + // validator wasn't in the common validator set + continue + } + validators = append(validators, val) + } + sort.Sort(ValidatorsByVotingPower(validators)) + return validators + } else if trusted.Commit.Round == l.ConflictingBlock.Commit.Round { + // This is an equivocation attack as both commits are in the same round. We then find the validators + // from the conflicting light block validator set that voted in both headers. + // Validator hashes are the same therefore the indexing order of validators are the same and thus we + // only need a single loop to find the validators that voted twice. + for i := 0; i < len(l.ConflictingBlock.Commit.Signatures); i++ { + sigA := l.ConflictingBlock.Commit.Signatures[i] + if sigA.Absent() { + continue + } + + sigB := trusted.Commit.Signatures[i] + if sigB.Absent() { + continue + } + + _, val := l.ConflictingBlock.ValidatorSet.GetByAddress(sigA.ValidatorAddress) + validators = append(validators, val) + } + sort.Sort(ValidatorsByVotingPower(validators)) + return validators } - return nil + // if the rounds are different then this is an amnesia attack. Unfortunately, given the nature of the attack, + // we aren't able yet to deduce which are malicious validators and which are not hence we return an + // empty validator set. + return validators } -//----------------------------------------------------------------- +// ConflictingHeaderIsInvalid takes a trusted header and matches it againt a conflicting header +// to determine whether the conflicting header was the product of a valid state transition +// or not. If it is then all the deterministic fields of the header should be the same. +// If not, it is an invalid header and constitutes a lunatic attack. +func (l *LightClientAttackEvidence) ConflictingHeaderIsInvalid(trustedHeader *Header) bool { + return !bytes.Equal(trustedHeader.ValidatorsHash, l.ConflictingBlock.ValidatorsHash) || + !bytes.Equal(trustedHeader.NextValidatorsHash, l.ConflictingBlock.NextValidatorsHash) || + !bytes.Equal(trustedHeader.ConsensusHash, l.ConflictingBlock.ConsensusHash) || + !bytes.Equal(trustedHeader.AppHash, l.ConflictingBlock.AppHash) || + !bytes.Equal(trustedHeader.LastResultsHash, l.ConflictingBlock.LastResultsHash) -// UNSTABLE -type MockRandomEvidence struct { - MockEvidence - randBytes []byte } -var _ Evidence = &MockRandomEvidence{} +// Hash returns the hash of the header and the commonHeight. This is designed to cause hash collisions +// with evidence that have the same conflicting header and common height but different permutations +// of validator commit signatures. The reason for this is that we don't want to allow several +// permutations of the same evidence to be committed on chain. Ideally we commit the header with the +// most commit signatures (captures the most byzantine validators) but anything greater than 1/3 is sufficient. +func (l *LightClientAttackEvidence) Hash() []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(buf, l.CommonHeight) + bz := make([]byte, tmhash.Size+n) + copy(bz[:tmhash.Size-1], l.ConflictingBlock.Hash().Bytes()) + copy(bz[tmhash.Size:], buf) + return tmhash.Sum(bz) +} -// UNSTABLE -func NewMockRandomEvidence(height int64, eTime time.Time, address []byte, randBytes []byte) MockRandomEvidence { - return MockRandomEvidence{ - MockEvidence{ - EvidenceHeight: height, - EvidenceTime: eTime, - EvidenceAddress: address}, randBytes, - } +// Height returns the last height at which the primary provider and witness provider had the same header. +// We use this as the height of the infraction rather than the actual conflicting header because we know +// that the malicious validators were bonded at this height which is important for evidence expiry +func (l *LightClientAttackEvidence) Height() int64 { + return l.CommonHeight } -func (e MockRandomEvidence) Hash() []byte { - return []byte(fmt.Sprintf("%d-%x", e.EvidenceHeight, e.randBytes)) +// String returns a string representation of LightClientAttackEvidence +func (l *LightClientAttackEvidence) String() string { + return fmt.Sprintf("LightClientAttackEvidence{ConflictingBlock: %v, CommonHeight: %d}", + l.ConflictingBlock.String(), l.CommonHeight) } -// UNSTABLE -type MockEvidence struct { - EvidenceHeight int64 - EvidenceTime time.Time - EvidenceAddress []byte +// Time returns the time of the common block where the infraction leveraged off. +func (l *LightClientAttackEvidence) Time() time.Time { + return l.Timestamp } -var _ Evidence = &MockEvidence{} +// ValidateBasic performs basic validation such that the evidence is consistent and can now be used for verification. +func (l *LightClientAttackEvidence) ValidateBasic() error { + if l.ConflictingBlock == nil { + return errors.New("conflicting block is nil") + } -// UNSTABLE -func NewMockEvidence(height int64, eTime time.Time, idx int, address []byte) MockEvidence { - return MockEvidence{ - EvidenceHeight: height, - EvidenceTime: eTime, - EvidenceAddress: address} -} + // this check needs to be done before we can run validate basic + if l.ConflictingBlock.Header == nil { + return errors.New("conflicting block missing header") + } -func (e MockEvidence) Height() int64 { return e.EvidenceHeight } -func (e MockEvidence) Time() time.Time { return e.EvidenceTime } -func (e MockEvidence) Address() []byte { return e.EvidenceAddress } -func (e MockEvidence) Hash() []byte { - return []byte(fmt.Sprintf("%d-%x-%s", - e.EvidenceHeight, e.EvidenceAddress, e.EvidenceTime)) -} -func (e MockEvidence) Bytes() []byte { - return []byte(fmt.Sprintf("%d-%x-%s", - e.EvidenceHeight, e.EvidenceAddress, e.EvidenceTime)) + if err := l.ConflictingBlock.ValidateBasic(l.ConflictingBlock.ChainID); err != nil { + return fmt.Errorf("invalid conflicting light block: %w", err) + } + + if l.CommonHeight <= 0 { + return errors.New("negative or zero common height") + } + + // check that common height isn't ahead of the height of the conflicting block. It + // is possible that they are the same height if the light node witnesses either an + // amnesia or a equivocation attack. + if l.CommonHeight > l.ConflictingBlock.Height { + return fmt.Errorf("common height is ahead of the conflicting block height (%d > %d)", + l.CommonHeight, l.ConflictingBlock.Height) + } + + return nil } -func (e MockEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil } -func (e MockEvidence) Equal(ev Evidence) bool { - e2 := ev.(MockEvidence) - return e.EvidenceHeight == e2.EvidenceHeight && - bytes.Equal(e.EvidenceAddress, e2.EvidenceAddress) + +// ToProto encodes LightClientAttackEvidence to protobuf +func (l *LightClientAttackEvidence) ToProto() (*tmproto.LightClientAttackEvidence, error) { + conflictingBlock, err := l.ConflictingBlock.ToProto() + if err != nil { + return nil, err + } + + byzVals := make([]*tmproto.Validator, len(l.ByzantineValidators)) + for idx, val := range l.ByzantineValidators { + valpb, err := val.ToProto() + if err != nil { + return nil, err + } + byzVals[idx] = valpb + } + + return &tmproto.LightClientAttackEvidence{ + ConflictingBlock: conflictingBlock, + CommonHeight: l.CommonHeight, + ByzantineValidators: byzVals, + TotalVotingPower: l.TotalVotingPower, + Timestamp: l.Timestamp, + }, nil } -func (e MockEvidence) ValidateBasic() error { return nil } -func (e MockEvidence) String() string { - return fmt.Sprintf("Evidence: %d/%s/%s", e.EvidenceHeight, e.Time(), e.EvidenceAddress) + +// LightClientAttackEvidenceFromProto decodes protobuf +func LightClientAttackEvidenceFromProto(lpb *tmproto.LightClientAttackEvidence) (*LightClientAttackEvidence, error) { + if lpb == nil { + return nil, errors.New("empty light client attack evidence") + } + + conflictingBlock, err := LightBlockFromProto(lpb.ConflictingBlock) + if err != nil { + return nil, err + } + + byzVals := make([]*Validator, len(lpb.ByzantineValidators)) + for idx, valpb := range lpb.ByzantineValidators { + val, err := ValidatorFromProto(valpb) + if err != nil { + return nil, err + } + byzVals[idx] = val + } + + l := &LightClientAttackEvidence{ + ConflictingBlock: conflictingBlock, + CommonHeight: lpb.CommonHeight, + ByzantineValidators: byzVals, + TotalVotingPower: lpb.TotalVotingPower, + Timestamp: lpb.Timestamp, + } + + return l, l.ValidateBasic() } -//------------------------------------------- +//------------------------------------------------------------------------------------------ // EvidenceList is a list of Evidence. Evidences is not a word. type EvidenceList []Evidence @@ -322,7 +423,7 @@ func (evl EvidenceList) Hash() []byte { for i := 0; i < len(evl); i++ { evidenceBzs[i] = evl[i].Bytes() } - return merkle.SimpleHashFromByteSlices(evidenceBzs) + return merkle.HashFromByteSlices(evidenceBzs) } func (evl EvidenceList) String() string { @@ -336,9 +437,148 @@ func (evl EvidenceList) String() string { // Has returns true if the evidence is in the EvidenceList. func (evl EvidenceList) Has(evidence Evidence) bool { for _, ev := range evl { - if ev.Equal(evidence) { + if bytes.Equal(evidence.Hash(), ev.Hash()) { return true } } return false } + +//------------------------------------------ PROTO -------------------------------------- + +// EvidenceToProto is a generalized function for encoding evidence that conforms to the +// evidence interface to protobuf +func EvidenceToProto(evidence Evidence) (*tmproto.Evidence, error) { + if evidence == nil { + return nil, errors.New("nil evidence") + } + + switch evi := evidence.(type) { + case *DuplicateVoteEvidence: + pbev := evi.ToProto() + return &tmproto.Evidence{ + Sum: &tmproto.Evidence_DuplicateVoteEvidence{ + DuplicateVoteEvidence: pbev, + }, + }, nil + + case *LightClientAttackEvidence: + pbev, err := evi.ToProto() + if err != nil { + return nil, err + } + return &tmproto.Evidence{ + Sum: &tmproto.Evidence_LightClientAttackEvidence{ + LightClientAttackEvidence: pbev, + }, + }, nil + + default: + return nil, fmt.Errorf("toproto: evidence is not recognized: %T", evi) + } +} + +// EvidenceFromProto is a generalized function for decoding protobuf into the +// evidence interface +func EvidenceFromProto(evidence *tmproto.Evidence) (Evidence, error) { + if evidence == nil { + return nil, errors.New("nil evidence") + } + + switch evi := evidence.Sum.(type) { + case *tmproto.Evidence_DuplicateVoteEvidence: + return DuplicateVoteEvidenceFromProto(evi.DuplicateVoteEvidence) + case *tmproto.Evidence_LightClientAttackEvidence: + return LightClientAttackEvidenceFromProto(evi.LightClientAttackEvidence) + default: + return nil, errors.New("evidence is not recognized") + } +} + +func init() { + tmjson.RegisterType(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence") + tmjson.RegisterType(&LightClientAttackEvidence{}, "tendermint/LightClientAttackEvidence") +} + +//-------------------------------------------- ERRORS -------------------------------------- + +// ErrInvalidEvidence wraps a piece of evidence and the error denoting how or why it is invalid. +type ErrInvalidEvidence struct { + Evidence Evidence + Reason error +} + +// NewErrInvalidEvidence returns a new EvidenceInvalid with the given err. +func NewErrInvalidEvidence(ev Evidence, err error) *ErrInvalidEvidence { + return &ErrInvalidEvidence{ev, err} +} + +// Error returns a string representation of the error. +func (err *ErrInvalidEvidence) Error() string { + return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.Reason, err.Evidence) +} + +// ErrEvidenceOverflow is for when there the amount of evidence exceeds the max bytes. +type ErrEvidenceOverflow struct { + Max int64 + Got int64 +} + +// NewErrEvidenceOverflow returns a new ErrEvidenceOverflow where got > max. +func NewErrEvidenceOverflow(max, got int64) *ErrEvidenceOverflow { + return &ErrEvidenceOverflow{max, got} +} + +// Error returns a string representation of the error. +func (err *ErrEvidenceOverflow) Error() string { + return fmt.Sprintf("Too much evidence: Max %d, got %d", err.Max, err.Got) +} + +//-------------------------------------------- MOCKING -------------------------------------- + +// unstable - use only for testing + +// assumes the round to be 0 and the validator index to be 0 +func NewMockDuplicateVoteEvidence(height int64, time time.Time, chainID string) *DuplicateVoteEvidence { + val := NewMockPV() + return NewMockDuplicateVoteEvidenceWithValidator(height, time, val, chainID) +} + +// assumes voting power to be 10 and validator to be the only one in the set +func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, + pv PrivValidator, chainID string) *DuplicateVoteEvidence { + pubKey, _ := pv.GetPubKey() + val := NewValidator(pubKey, 10) + voteA := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) + vA := voteA.ToProto() + _ = pv.SignVote(chainID, vA) + voteA.Signature = vA.Signature + voteB := makeMockVote(height, 0, 0, pubKey.Address(), randBlockID(), time) + vB := voteB.ToProto() + _ = pv.SignVote(chainID, vB) + voteB.Signature = vB.Signature + return NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) +} + +func makeMockVote(height int64, round, index int32, addr Address, + blockID BlockID, time time.Time) *Vote { + return &Vote{ + Type: tmproto.SignedMsgType(2), + Height: height, + Round: round, + BlockID: blockID, + Timestamp: time, + ValidatorAddress: addr, + ValidatorIndex: index, + } +} + +func randBlockID() BlockID { + return BlockID{ + Hash: tmrand.Bytes(tmhash.Size), + PartSetHeader: PartSetHeader{ + Total: 1, + Hash: tmrand.Bytes(tmhash.Size), + }, + } +} diff --git a/types/evidence_test.go b/types/evidence_test.go index 40e096fcd..2e61f6a9c 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -8,90 +8,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/version" ) -type voteData struct { - vote1 *Vote - vote2 *Vote - valid bool -} - -func makeVote( - t *testing.T, val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID, -) *Vote { - pubKey, err := val.GetPubKey() - require.NoError(t, err) - v := &Vote{ - ValidatorAddress: pubKey.Address(), - ValidatorIndex: valIndex, - Height: height, - Round: round, - Type: SignedMsgType(step), - BlockID: blockID, - } - err = val.SignVote(chainID, v) - if err != nil { - panic(err) - } - return v -} - -func TestEvidence(t *testing.T) { - val := NewMockPV() - val2 := NewMockPV() - - blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) - blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) - blockID3 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash")) - blockID4 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash2")) - - const chainID = "mychain" - - vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) - badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) - err := val2.SignVote(chainID, badVote) - assert.NoError(t, err) - - cases := []voteData{ - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id - {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id - {vote1, makeVote(t, val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index - {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height - {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round - {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step - {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator - {vote1, badVote, false}, // signed by wrong key - } - - pubKey, err := val.GetPubKey() - require.NoError(t, err) - for _, c := range cases { - ev := &DuplicateVoteEvidence{ - VoteA: c.vote1, - VoteB: c.vote2, - } - if c.valid { - assert.Nil(t, ev.Verify(chainID, pubKey), "evidence should be valid") - } else { - assert.NotNil(t, ev.Verify(chainID, pubKey), "evidence should be invalid") - } - } -} - -func TestDuplicatedVoteEvidence(t *testing.T) { - ev := randomDuplicatedVoteEvidence(t) - - assert.True(t, ev.Equal(ev)) - assert.False(t, ev.Equal(&DuplicateVoteEvidence{})) -} +var defaultVoteTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) func TestEvidenceList(t *testing.T) { - ev := randomDuplicatedVoteEvidence(t) + ev := randomDuplicateVoteEvidence(t) evl := EvidenceList([]Evidence{ev}) assert.NotNil(t, evl.Hash()) @@ -99,38 +27,32 @@ func TestEvidenceList(t *testing.T) { assert.False(t, evl.Has(&DuplicateVoteEvidence{})) } -func TestMaxEvidenceBytes(t *testing.T) { - val := NewMockPV() - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) - const chainID = "mychain" - ev := &DuplicateVoteEvidence{ - PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer - VoteA: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), - VoteB: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), - } - - bz, err := cdc.MarshalBinaryLengthPrefixed(ev) - require.NoError(t, err) - - assert.EqualValues(t, MaxEvidenceBytes, len(bz)) -} - -func randomDuplicatedVoteEvidence(t *testing.T) *DuplicateVoteEvidence { +func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { val := NewMockPV() blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ - VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID, defaultVoteTime), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, defaultVoteTime.Add(1*time.Minute)), + TotalVotingPower: 30, + ValidatorPower: 10, + Timestamp: defaultVoteTime, } } +func TestDuplicateVoteEvidence(t *testing.T) { + const height = int64(13) + ev := NewMockDuplicateVoteEvidence(height, time.Now(), "mock-chain-id") + assert.Equal(t, ev.Hash(), tmhash.Sum(ev.Bytes())) + assert.NotNil(t, ev.String()) + assert.Equal(t, ev.Height(), height) +} + func TestDuplicateVoteEvidenceValidation(t *testing.T) { val := NewMockPV() - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) const chainID = "mychain" testCases := []struct { @@ -146,7 +68,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { ev.VoteB = nil }, true}, {"Invalid vote type", func(ev *DuplicateVoteEvidence) { - ev.VoteA = makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) + ev.VoteA = makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0, blockID2, defaultVoteTime) }, true}, {"Invalid vote order", func(ev *DuplicateVoteEvidence) { swap := ev.VoteA.Copy() @@ -157,22 +79,241 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.testName, func(t *testing.T) { - pk := secp256k1.GenPrivKey().PubKey() - vote1 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID) - vote2 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2) - ev := NewDuplicateVoteEvidence(pk, vote1, vote2) + vote1 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) + vote2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) + valSet := NewValidatorSet([]*Validator{val.ExtractIntoValidator(10)}) + ev := NewDuplicateVoteEvidence(vote1, vote2, defaultVoteTime, valSet) tc.malleateEvidence(ev) assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) } } -func TestMockGoodEvidenceValidateBasic(t *testing.T) { - goodEvidence := NewMockEvidence(int64(1), time.Now(), 1, []byte{1}) +func TestLightClientAttackEvidence(t *testing.T) { + height := int64(5) + voteSet, valSet, privVals := randVoteSet(height, 1, tmproto.PrecommitType, 10, 1) + header := makeHeaderRandom() + header.Height = height + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + commit, err := MakeCommit(blockID, height, 1, voteSet, privVals, defaultVoteTime) + require.NoError(t, err) + lcae := &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: header, + Commit: commit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 1, + } + assert.NotNil(t, lcae.String()) + assert.NotNil(t, lcae.Hash()) + // only 7 validators sign + differentCommit, err := MakeCommit(blockID, height, 1, voteSet, privVals[:7], defaultVoteTime) + require.NoError(t, err) + differentEv := &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: header, + Commit: differentCommit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 1, + } + assert.Equal(t, lcae.Hash(), differentEv.Hash()) + // different header hash + differentHeader := makeHeaderRandom() + differentEv = &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: differentHeader, + Commit: differentCommit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 1, + } + assert.NotEqual(t, lcae.Hash(), differentEv.Hash()) + // different common height should produce a different header + differentEv = &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: header, + Commit: differentCommit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 2, + } + assert.NotEqual(t, lcae.Hash(), differentEv.Hash()) + assert.Equal(t, lcae.Height(), int64(4)) // Height should be the common Height + assert.NotNil(t, lcae.Bytes()) +} + +func TestLightClientAttackEvidenceValidation(t *testing.T) { + height := int64(5) + voteSet, valSet, privVals := randVoteSet(height, 1, tmproto.PrecommitType, 10, 1) + header := makeHeaderRandom() + header.Height = height + header.ValidatorsHash = valSet.Hash() + blockID := makeBlockID(header.Hash(), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + commit, err := MakeCommit(blockID, height, 1, voteSet, privVals, time.Now()) + require.NoError(t, err) + lcae := &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: header, + Commit: commit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 1, + } + assert.NoError(t, lcae.ValidateBasic()) + + testCases := []struct { + testName string + malleateEvidence func(*LightClientAttackEvidence) + expectErr bool + }{ + {"Good DuplicateVoteEvidence", func(ev *LightClientAttackEvidence) {}, false}, + {"Negative height", func(ev *LightClientAttackEvidence) { ev.CommonHeight = -10 }, true}, + {"Height is greater than divergent block", func(ev *LightClientAttackEvidence) { + ev.CommonHeight = height + 1 + }, true}, + {"Nil conflicting header", func(ev *LightClientAttackEvidence) { ev.ConflictingBlock.Header = nil }, true}, + {"Nil conflicting blocl", func(ev *LightClientAttackEvidence) { ev.ConflictingBlock = nil }, true}, + {"Nil validator set", func(ev *LightClientAttackEvidence) { + ev.ConflictingBlock.ValidatorSet = &ValidatorSet{} + }, true}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + lcae := &LightClientAttackEvidence{ + ConflictingBlock: &LightBlock{ + SignedHeader: &SignedHeader{ + Header: header, + Commit: commit, + }, + ValidatorSet: valSet, + }, + CommonHeight: height - 1, + } + tc.malleateEvidence(lcae) + if tc.expectErr { + assert.Error(t, lcae.ValidateBasic(), tc.testName) + } else { + assert.NoError(t, lcae.ValidateBasic(), tc.testName) + } + }) + } + +} + +func TestMockEvidenceValidateBasic(t *testing.T) { + goodEvidence := NewMockDuplicateVoteEvidence(int64(1), time.Now(), "mock-chain-id") assert.Nil(t, goodEvidence.ValidateBasic()) } -func TestMockBadEvidenceValidateBasic(t *testing.T) { - badEvidence := NewMockEvidence(int64(1), time.Now(), 1, []byte{1}) - assert.Nil(t, badEvidence.ValidateBasic()) +func makeVote( + t *testing.T, val PrivValidator, chainID string, valIndex int32, height int64, round int32, step int, blockID BlockID, + time time.Time) *Vote { + pubKey, err := val.GetPubKey() + require.NoError(t, err) + v := &Vote{ + ValidatorAddress: pubKey.Address(), + ValidatorIndex: valIndex, + Height: height, + Round: round, + Type: tmproto.SignedMsgType(step), + BlockID: blockID, + Timestamp: time, + } + + vpb := v.ToProto() + err = val.SignVote(chainID, vpb) + if err != nil { + panic(err) + } + v.Signature = vpb.Signature + return v +} + +func makeHeaderRandom() *Header { + return &Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: 1}, + ChainID: tmrand.Str(12), + Height: int64(tmrand.Uint16()) + 1, + Time: time.Now(), + LastBlockID: makeBlockIDRandom(), + LastCommitHash: crypto.CRandBytes(tmhash.Size), + DataHash: crypto.CRandBytes(tmhash.Size), + ValidatorsHash: crypto.CRandBytes(tmhash.Size), + NextValidatorsHash: crypto.CRandBytes(tmhash.Size), + ConsensusHash: crypto.CRandBytes(tmhash.Size), + AppHash: crypto.CRandBytes(tmhash.Size), + LastResultsHash: crypto.CRandBytes(tmhash.Size), + EvidenceHash: crypto.CRandBytes(tmhash.Size), + ProposerAddress: crypto.CRandBytes(crypto.AddressSize), + } +} + +func TestEvidenceProto(t *testing.T) { + // -------- Votes -------- + val := NewMockPV() + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + const chainID = "mychain" + v := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, blockID, defaultVoteTime) + v2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, blockID2, defaultVoteTime) + + // -------- SignedHeaders -------- + const height int64 = 37 + + var ( + header1 = makeHeaderRandom() + header2 = makeHeaderRandom() + ) + + header1.Height = height + header1.LastBlockID = blockID + header1.ChainID = chainID + + header2.Height = height + header2.LastBlockID = blockID + header2.ChainID = chainID + + tests := []struct { + testName string + evidence Evidence + toProtoErr bool + fromProtoErr bool + }{ + {"nil fail", nil, true, true}, + {"DuplicateVoteEvidence empty fail", &DuplicateVoteEvidence{}, false, true}, + {"DuplicateVoteEvidence nil voteB", &DuplicateVoteEvidence{VoteA: v, VoteB: nil}, false, true}, + {"DuplicateVoteEvidence nil voteA", &DuplicateVoteEvidence{VoteA: nil, VoteB: v}, false, true}, + {"DuplicateVoteEvidence success", &DuplicateVoteEvidence{VoteA: v2, VoteB: v}, false, false}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.testName, func(t *testing.T) { + pb, err := EvidenceToProto(tt.evidence) + if tt.toProtoErr { + assert.Error(t, err, tt.testName) + return + } + assert.NoError(t, err, tt.testName) + + evi, err := EvidenceFromProto(pb) + if tt.fromProtoErr { + assert.Error(t, err, tt.testName) + return + } + require.Equal(t, tt.evidence, evi, tt.testName) + }) + } } diff --git a/types/genesis.go b/types/genesis.go index 73a7847d8..964df923e 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -3,15 +3,15 @@ package types import ( "bytes" "encoding/json" + "errors" "fmt" "io/ioutil" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmos "github.com/tendermint/tendermint/libs/os" + tmjson "github.com/tendermint/tendermint/libs/json" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -36,21 +36,23 @@ type GenesisValidator struct { // GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. type GenesisDoc struct { - GenesisTime time.Time `json:"genesis_time"` - ChainID string `json:"chain_id"` - ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators,omitempty"` - AppHash tmbytes.HexBytes `json:"app_hash"` - AppState json.RawMessage `json:"app_state,omitempty"` + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height"` + ConsensusParams *tmproto.ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash tmbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` } // SaveAs is a utility method for saving GenensisDoc as a JSON file. func (genDoc *GenesisDoc) SaveAs(file string) error { - genDocBytes, err := cdc.MarshalJSONIndent(genDoc, "", " ") + genDocBytes, err := tmjson.MarshalIndent(genDoc, "", " ") if err != nil { return err } - return tmos.WriteFile(file, genDocBytes, 0644) + + return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -70,21 +72,27 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { return errors.New("genesis doc must include non-empty chain_id") } if len(genDoc.ChainID) > MaxChainIDLen { - return errors.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + return fmt.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + } + if genDoc.InitialHeight < 0 { + return fmt.Errorf("initial_height cannot be negative (got %v)", genDoc.InitialHeight) + } + if genDoc.InitialHeight == 0 { + genDoc.InitialHeight = 1 } if genDoc.ConsensusParams == nil { genDoc.ConsensusParams = DefaultConsensusParams() - } else if err := genDoc.ConsensusParams.Validate(); err != nil { + } else if err := ValidateConsensusParams(*genDoc.ConsensusParams); err != nil { return err } for i, v := range genDoc.Validators { if v.Power == 0 { - return errors.Errorf("the genesis file cannot contain validators with no voting power: %v", v) + return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v) } if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) { - return errors.Errorf("incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) + return fmt.Errorf("incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) } if len(v.Address) == 0 { genDoc.Validators[i].Address = v.PubKey.Address() @@ -104,7 +112,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { // GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { genDoc := GenesisDoc{} - err := cdc.UnmarshalJSON(jsonBlob, &genDoc) + err := tmjson.Unmarshal(jsonBlob, &genDoc) if err != nil { return nil, err } @@ -120,11 +128,11 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { jsonBlob, err := ioutil.ReadFile(genDocFile) if err != nil { - return nil, errors.Wrap(err, "Couldn't read GenesisDoc file") + return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } genDoc, err := GenesisDocFromJSON(jsonBlob) if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) + return nil, fmt.Errorf("error reading GenesisDoc at %s: %w", genDocFile, err) } return genDoc, nil } diff --git a/types/genesis_test.go b/types/genesis_test.go index ee713a6e7..fa579a8d0 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + tmjson "github.com/tendermint/tendermint/libs/json" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -18,7 +19,8 @@ func TestGenesisBad(t *testing.T) { {}, // empty {1, 1, 1, 1, 1}, // junk []byte(`{}`), // empty - []byte(`{"chain_id":"mychain","validators":[{}]}`), // invalid validator + []byte(`{"chain_id":"mychain","validators":[{}]}`), // invalid validator + []byte(`{"chain_id":"chain","initial_height":"-1"}`), // negative initial height // missing pub_key type []byte( `{"validators":[{"pub_key":{"value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`, @@ -58,11 +60,19 @@ func TestGenesisBad(t *testing.T) { func TestGenesisGood(t *testing.T) { // test a good one by raw json genDocBytes := []byte( - `{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[` + - `{"pub_key":{` + - `"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="` + - `},"power":"10","name":""}` + - `],"app_hash":"","app_state":{"account_owner": "Bob"}}`, + `{ + "genesis_time": "0001-01-01T00:00:00Z", + "chain_id": "test-chain-QDKdJr", + "initial_height": "1000", + "consensus_params": null, + "validators": [{ + "pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="}, + "power":"10", + "name":"" + }], + "app_hash":"", + "app_state":{"account_owner": "Bob"} + }`, ) _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") @@ -73,7 +83,7 @@ func TestGenesisGood(t *testing.T) { ChainID: "abc", Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, } - genDocBytes, err = cdc.MarshalJSON(baseGenDoc) + genDocBytes, err = tmjson.Marshal(baseGenDoc) assert.NoError(t, err, "error marshalling genDoc") // test base gendoc and check consensus params were filled @@ -85,14 +95,14 @@ func TestGenesisGood(t *testing.T) { assert.NotNil(t, genDoc.Validators[0].Address, "expected validator's address to be filled in") // create json with consensus params filled - genDocBytes, err = cdc.MarshalJSON(genDoc) + genDocBytes, err = tmjson.Marshal(genDoc) assert.NoError(t, err, "error marshalling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for valid genDoc json") // test with invalid consensus params genDoc.ConsensusParams.Block.MaxBytes = 0 - genDocBytes, err = cdc.MarshalJSON(genDoc) + genDocBytes, err = tmjson.Marshal(genDoc) assert.NoError(t, err, "error marshalling genDoc") _, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") @@ -119,7 +129,8 @@ func TestGenesisSaveAs(t *testing.T) { genDoc := randomGenesisDoc() // save - genDoc.SaveAs(tmpfile.Name()) + err = genDoc.SaveAs(tmpfile.Name()) + require.NoError(t, err) stat, err := tmpfile.Stat() require.NoError(t, err) if err != nil && stat.Size() <= 0 { @@ -132,9 +143,7 @@ func TestGenesisSaveAs(t *testing.T) { // load genDoc2, err := GenesisDocFromFile(tmpfile.Name()) require.NoError(t, err) - - // fails to unknown reason - // assert.EqualValues(t, genDoc2, genDoc) + assert.EqualValues(t, genDoc2, genDoc) assert.Equal(t, genDoc2.Validators, genDoc.Validators) } @@ -148,7 +157,9 @@ func randomGenesisDoc() *GenesisDoc { return &GenesisDoc{ GenesisTime: tmtime.Now(), ChainID: "abc", + InitialHeight: 1000, Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, ConsensusParams: DefaultConsensusParams(), + AppHash: []byte{1, 2, 3}, } } diff --git a/types/light.go b/types/light.go new file mode 100644 index 000000000..8f09d8205 --- /dev/null +++ b/types/light.go @@ -0,0 +1,221 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" +) + +// LightBlock is a SignedHeader and a ValidatorSet. +// It is the basis of the light client +type LightBlock struct { + *SignedHeader `json:"signed_header"` + ValidatorSet *ValidatorSet `json:"validator_set"` +} + +// ValidateBasic checks that the data is correct and consistent +// +// This does no verification of the signatures +func (lb LightBlock) ValidateBasic(chainID string) error { + if lb.SignedHeader == nil { + return errors.New("missing signed header") + } + if lb.ValidatorSet == nil { + return errors.New("missing validator set") + } + + if err := lb.SignedHeader.ValidateBasic(chainID); err != nil { + return fmt.Errorf("invalid signed header: %w", err) + } + if err := lb.ValidatorSet.ValidateBasic(); err != nil { + return fmt.Errorf("invalid validator set: %w", err) + } + + // make sure the validator set is consistent with the header + if valSetHash := lb.ValidatorSet.Hash(); !bytes.Equal(lb.SignedHeader.ValidatorsHash, valSetHash) { + return fmt.Errorf("expected validator hash of header to match validator set hash (%X != %X)", + lb.SignedHeader.ValidatorsHash, valSetHash, + ) + } + + return nil +} + +// String returns a string representation of the LightBlock +func (lb LightBlock) String() string { + return lb.StringIndented("") +} + +// StringIndented returns an indented string representation of the LightBlock +// +// SignedHeader +// ValidatorSet +func (lb LightBlock) StringIndented(indent string) string { + return fmt.Sprintf(`LightBlock{ +%s %v +%s %v +%s}`, + indent, lb.SignedHeader.StringIndented(indent+" "), + indent, lb.ValidatorSet.StringIndented(indent+" "), + indent) +} + +// ToProto converts the LightBlock to protobuf +func (lb *LightBlock) ToProto() (*tmproto.LightBlock, error) { + if lb == nil { + return nil, nil + } + + lbp := new(tmproto.LightBlock) + var err error + if lb.SignedHeader != nil { + lbp.SignedHeader = lb.SignedHeader.ToProto() + } + if lb.ValidatorSet != nil { + lbp.ValidatorSet, err = lb.ValidatorSet.ToProto() + if err != nil { + return nil, err + } + } + + return lbp, nil +} + +// LightBlockFromProto converts from protobuf back into the Lightblock. +// An error is returned if either the validator set or signed header are invalid +func LightBlockFromProto(pb *tmproto.LightBlock) (*LightBlock, error) { + if pb == nil { + return nil, errors.New("nil light block") + } + + lb := new(LightBlock) + + if pb.SignedHeader != nil { + sh, err := SignedHeaderFromProto(pb.SignedHeader) + if err != nil { + return nil, err + } + lb.SignedHeader = sh + } + + if pb.ValidatorSet != nil { + vals, err := ValidatorSetFromProto(pb.ValidatorSet) + if err != nil { + return nil, err + } + lb.ValidatorSet = vals + } + + return lb, nil +} + +//----------------------------------------------------------------------------- + +// SignedHeader is a header along with the commits that prove it. +type SignedHeader struct { + *Header `json:"header"` + + Commit *Commit `json:"commit"` +} + +// ValidateBasic does basic consistency checks and makes sure the header +// and commit are consistent. +// +// NOTE: This does not actually check the cryptographic signatures. Make sure +// to use a Verifier to validate the signatures actually provide a +// significantly strong proof for this header's validity. +func (sh SignedHeader) ValidateBasic(chainID string) error { + if sh.Header == nil { + return errors.New("missing header") + } + if sh.Commit == nil { + return errors.New("missing commit") + } + + if err := sh.Header.ValidateBasic(); err != nil { + return fmt.Errorf("invalid header: %w", err) + } + if err := sh.Commit.ValidateBasic(); err != nil { + return fmt.Errorf("invalid commit: %w", err) + } + + if sh.ChainID != chainID { + return fmt.Errorf("header belongs to another chain %q, not %q", sh.ChainID, chainID) + } + + // Make sure the header is consistent with the commit. + if sh.Commit.Height != sh.Height { + return fmt.Errorf("header and commit height mismatch: %d vs %d", sh.Height, sh.Commit.Height) + } + if hhash, chash := sh.Hash(), sh.Commit.BlockID.Hash; !bytes.Equal(hhash, chash) { + return fmt.Errorf("commit signs block %X, header is block %X", chash, hhash) + } + + return nil +} + +// String returns a string representation of SignedHeader. +func (sh SignedHeader) String() string { + return sh.StringIndented("") +} + +// StringIndented returns an indented string representation of SignedHeader. +// +// Header +// Commit +func (sh SignedHeader) StringIndented(indent string) string { + return fmt.Sprintf(`SignedHeader{ +%s %v +%s %v +%s}`, + indent, sh.Header.StringIndented(indent+" "), + indent, sh.Commit.StringIndented(indent+" "), + indent) +} + +// ToProto converts SignedHeader to protobuf +func (sh *SignedHeader) ToProto() *tmproto.SignedHeader { + if sh == nil { + return nil + } + + psh := new(tmproto.SignedHeader) + if sh.Header != nil { + psh.Header = sh.Header.ToProto() + } + if sh.Commit != nil { + psh.Commit = sh.Commit.ToProto() + } + + return psh +} + +// FromProto sets a protobuf SignedHeader to the given pointer. +// It returns an error if the header or the commit is invalid. +func SignedHeaderFromProto(shp *tmproto.SignedHeader) (*SignedHeader, error) { + if shp == nil { + return nil, errors.New("nil SignedHeader") + } + + sh := new(SignedHeader) + + if shp.Header != nil { + h, err := HeaderFromProto(shp.Header) + if err != nil { + return nil, err + } + sh.Header = &h + } + + if shp.Commit != nil { + c, err := CommitFromProto(shp.Commit) + if err != nil { + return nil, err + } + sh.Commit = c + } + + return sh, nil +} diff --git a/types/light_test.go b/types/light_test.go new file mode 100644 index 000000000..fa04cd4cf --- /dev/null +++ b/types/light_test.go @@ -0,0 +1,164 @@ +package types + +import ( + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/crypto" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/version" +) + +func TestLightBlockValidateBasic(t *testing.T) { + header := makeRandHeader() + commit := randCommit(time.Now()) + vals, _ := RandValidatorSet(5, 1) + header.Height = commit.Height + header.LastBlockID = commit.BlockID + header.ValidatorsHash = vals.Hash() + header.Version.Block = version.BlockProtocol + vals2, _ := RandValidatorSet(3, 1) + vals3 := vals.Copy() + vals3.Proposer = &Validator{} + commit.BlockID.Hash = header.Hash() + + sh := &SignedHeader{ + Header: &header, + Commit: commit, + } + + testCases := []struct { + name string + sh *SignedHeader + vals *ValidatorSet + expectErr bool + }{ + {"valid light block", sh, vals, false}, + {"hashes don't match", sh, vals2, true}, + {"invalid validator set", sh, vals3, true}, + {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(time.Now())}, vals, true}, + } + + for _, tc := range testCases { + lightBlock := LightBlock{ + SignedHeader: tc.sh, + ValidatorSet: tc.vals, + } + err := lightBlock.ValidateBasic(header.ChainID) + if tc.expectErr { + assert.Error(t, err, tc.name) + } else { + assert.NoError(t, err, tc.name) + } + } + +} + +func TestLightBlockProtobuf(t *testing.T) { + header := makeRandHeader() + commit := randCommit(time.Now()) + vals, _ := RandValidatorSet(5, 1) + header.Height = commit.Height + header.LastBlockID = commit.BlockID + header.Version.Block = version.BlockProtocol + header.ValidatorsHash = vals.Hash() + vals3 := vals.Copy() + vals3.Proposer = &Validator{} + commit.BlockID.Hash = header.Hash() + + sh := &SignedHeader{ + Header: &header, + Commit: commit, + } + + testCases := []struct { + name string + sh *SignedHeader + vals *ValidatorSet + toProtoErr bool + toBlockErr bool + }{ + {"valid light block", sh, vals, false, false}, + {"empty signed header", &SignedHeader{}, vals, false, false}, + {"empty validator set", sh, &ValidatorSet{}, false, true}, + {"empty light block", &SignedHeader{}, &ValidatorSet{}, false, true}, + } + + for _, tc := range testCases { + lightBlock := &LightBlock{ + SignedHeader: tc.sh, + ValidatorSet: tc.vals, + } + lbp, err := lightBlock.ToProto() + if tc.toProtoErr { + assert.Error(t, err, tc.name) + } else { + assert.NoError(t, err, tc.name) + } + + lb, err := LightBlockFromProto(lbp) + if tc.toBlockErr { + assert.Error(t, err, tc.name) + } else { + assert.NoError(t, err, tc.name) + assert.Equal(t, lightBlock, lb) + } + } + +} + +func TestSignedHeaderValidateBasic(t *testing.T) { + commit := randCommit(time.Now()) + chainID := "𠜎" + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + h := Header{ + Version: tmversion.Consensus{Block: version.BlockProtocol, App: math.MaxInt64}, + ChainID: chainID, + Height: commit.Height, + Time: timestamp, + LastBlockID: commit.BlockID, + LastCommitHash: commit.Hash(), + DataHash: commit.Hash(), + ValidatorsHash: commit.Hash(), + NextValidatorsHash: commit.Hash(), + ConsensusHash: commit.Hash(), + AppHash: commit.Hash(), + LastResultsHash: commit.Hash(), + EvidenceHash: commit.Hash(), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + } + + validSignedHeader := SignedHeader{Header: &h, Commit: commit} + validSignedHeader.Commit.BlockID.Hash = validSignedHeader.Hash() + invalidSignedHeader := SignedHeader{} + + testCases := []struct { + testName string + shHeader *Header + shCommit *Commit + expectErr bool + }{ + {"Valid Signed Header", validSignedHeader.Header, validSignedHeader.Commit, false}, + {"Invalid Signed Header", invalidSignedHeader.Header, validSignedHeader.Commit, true}, + {"Invalid Signed Header", validSignedHeader.Header, invalidSignedHeader.Commit, true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + sh := SignedHeader{ + Header: tc.shHeader, + Commit: tc.shCommit, + } + assert.Equal( + t, + tc.expectErr, + sh.ValidateBasic(validSignedHeader.Header.ChainID) != nil, + "Validate Basic had an unexpected result", + ) + }) + } +} diff --git a/types/params.go b/types/params.go index 538bbbd6d..16c85aa55 100644 --- a/types/params.go +++ b/types/params.go @@ -1,13 +1,13 @@ package types import ( + "errors" + "fmt" "time" - "github.com/pkg/errors" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/tmhash" - tmstrings "github.com/tendermint/tendermint/libs/strings" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -15,83 +15,55 @@ const ( MaxBlockSizeBytes = 104857600 // 100MB // BlockPartSizeBytes is the size of one block part. - BlockPartSizeBytes = 65536 // 64kB + BlockPartSizeBytes uint32 = 65536 // 64kB // MaxBlockPartsCount is the maximum number of block parts. MaxBlockPartsCount = (MaxBlockSizeBytes / BlockPartSizeBytes) + 1 ) -// ConsensusParams contains consensus critical parameters that determine the -// validity of blocks. -type ConsensusParams struct { - Block BlockParams `json:"block"` - Evidence EvidenceParams `json:"evidence"` - Validator ValidatorParams `json:"validator"` -} - -// HashedParams is a subset of ConsensusParams. -// It is amino encoded and hashed into -// the Header.ConsensusHash. -type HashedParams struct { - BlockMaxBytes int64 - BlockMaxGas int64 -} - -// BlockParams define limits on the block size and gas plus minimum time -// between blocks. -type BlockParams struct { - MaxBytes int64 `json:"max_bytes"` - MaxGas int64 `json:"max_gas"` - // Minimum time increment between consecutive blocks (in milliseconds) - // Not exposed to the application. - TimeIotaMs int64 `json:"time_iota_ms"` -} - -// EvidenceParams determine how we handle evidence of malfeasance. -type EvidenceParams struct { - MaxAgeNumBlocks int64 `json:"max_age_num_blocks"` // only accept new evidence more recent than this - MaxAgeDuration time.Duration `json:"max_age_duration"` -} - -// ValidatorParams restrict the public key types validators can use. -// NOTE: uses ABCI pubkey naming, not Amino names. -type ValidatorParams struct { - PubKeyTypes []string `json:"pub_key_types"` -} - // DefaultConsensusParams returns a default ConsensusParams. -func DefaultConsensusParams() *ConsensusParams { - return &ConsensusParams{ - DefaultBlockParams(), - DefaultEvidenceParams(), - DefaultValidatorParams(), +func DefaultConsensusParams() *tmproto.ConsensusParams { + return &tmproto.ConsensusParams{ + Block: DefaultBlockParams(), + Evidence: DefaultEvidenceParams(), + Validator: DefaultValidatorParams(), + Version: DefaultVersionParams(), } } // DefaultBlockParams returns a default BlockParams. -func DefaultBlockParams() BlockParams { - return BlockParams{ +func DefaultBlockParams() tmproto.BlockParams { + return tmproto.BlockParams{ MaxBytes: 22020096, // 21MB MaxGas: -1, TimeIotaMs: 1000, // 1s } } -// DefaultEvidenceParams Params returns a default EvidenceParams. -func DefaultEvidenceParams() EvidenceParams { - return EvidenceParams{ +// DefaultEvidenceParams returns a default EvidenceParams. +func DefaultEvidenceParams() tmproto.EvidenceParams { + return tmproto.EvidenceParams{ MaxAgeNumBlocks: 100000, // 27.8 hrs at 1block/s MaxAgeDuration: 48 * time.Hour, + MaxBytes: 1048576, // 1MB } } // DefaultValidatorParams returns a default ValidatorParams, which allows // only ed25519 pubkeys. -func DefaultValidatorParams() ValidatorParams { - return ValidatorParams{[]string{ABCIPubKeyTypeEd25519}} +func DefaultValidatorParams() tmproto.ValidatorParams { + return tmproto.ValidatorParams{ + PubKeyTypes: []string{ABCIPubKeyTypeEd25519}, + } +} + +func DefaultVersionParams() tmproto.VersionParams { + return tmproto.VersionParams{ + AppVersion: 0, + } } -func (params *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { +func IsValidPubkeyType(params tmproto.ValidatorParams, pubkeyType string) bool { for i := 0; i < len(params.PubKeyTypes); i++ { if params.PubKeyTypes[i] == pubkeyType { return true @@ -102,36 +74,46 @@ func (params *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { // Validate validates the ConsensusParams to ensure all values are within their // allowed limits, and returns an error if they are not. -func (params *ConsensusParams) Validate() error { +func ValidateConsensusParams(params tmproto.ConsensusParams) error { if params.Block.MaxBytes <= 0 { - return errors.Errorf("block.MaxBytes must be greater than 0. Got %d", + return fmt.Errorf("block.MaxBytes must be greater than 0. Got %d", params.Block.MaxBytes) } if params.Block.MaxBytes > MaxBlockSizeBytes { - return errors.Errorf("block.MaxBytes is too big. %d > %d", + return fmt.Errorf("block.MaxBytes is too big. %d > %d", params.Block.MaxBytes, MaxBlockSizeBytes) } if params.Block.MaxGas < -1 { - return errors.Errorf("block.MaxGas must be greater or equal to -1. Got %d", + return fmt.Errorf("block.MaxGas must be greater or equal to -1. Got %d", params.Block.MaxGas) } if params.Block.TimeIotaMs <= 0 { - return errors.Errorf("block.TimeIotaMs must be greater than 0. Got %v", + return fmt.Errorf("block.TimeIotaMs must be greater than 0. Got %v", params.Block.TimeIotaMs) } if params.Evidence.MaxAgeNumBlocks <= 0 { - return errors.Errorf("evidenceParams.MaxAgeNumBlocks must be greater than 0. Got %d", + return fmt.Errorf("evidence.MaxAgeNumBlocks must be greater than 0. Got %d", params.Evidence.MaxAgeNumBlocks) } if params.Evidence.MaxAgeDuration <= 0 { - return errors.Errorf("evidenceParams.MaxAgeDuration must be grater than 0 if provided, Got %v", + return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v", params.Evidence.MaxAgeDuration) } + if params.Evidence.MaxBytes > params.Block.MaxBytes { + return fmt.Errorf("evidence.MaxBytesEvidence is greater than upper bound, %d > %d", + params.Evidence.MaxBytes, params.Block.MaxBytes) + } + + if params.Evidence.MaxBytes < 0 { + return fmt.Errorf("evidence.MaxBytes must be non negative. Got: %d", + params.Evidence.MaxBytes) + } + if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") } @@ -139,8 +121,8 @@ func (params *ConsensusParams) Validate() error { // Check if keyType is a known ABCIPubKeyType for i := 0; i < len(params.Validator.PubKeyTypes); i++ { keyType := params.Validator.PubKeyTypes[i] - if _, ok := ABCIPubKeyTypesToAminoNames[keyType]; !ok { - return errors.Errorf("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", + if _, ok := ABCIPubKeyTypesToNames[keyType]; !ok { + return fmt.Errorf("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", i, keyType) } } @@ -152,28 +134,29 @@ func (params *ConsensusParams) Validate() error { // Only the Block.MaxBytes and Block.MaxGas are included in the hash. // This allows the ConsensusParams to evolve more without breaking the block // protocol. No need for a Merkle tree here, just a small struct to hash. -func (params *ConsensusParams) Hash() []byte { +func HashConsensusParams(params tmproto.ConsensusParams) []byte { hasher := tmhash.New() - bz := cdcEncode(HashedParams{ - params.Block.MaxBytes, - params.Block.MaxGas, - }) - if bz == nil { - panic("cannot fail to encode ConsensusParams") - } - hasher.Write(bz) - return hasher.Sum(nil) -} -func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { - return params.Block == params2.Block && - params.Evidence == params2.Evidence && - tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) + hp := tmproto.HashedParams{ + BlockMaxBytes: params.Block.MaxBytes, + BlockMaxGas: params.Block.MaxGas, + } + + bz, err := hp.Marshal() + if err != nil { + panic(err) + } + + _, err = hasher.Write(bz) + if err != nil { + panic(err) + } + return hasher.Sum(nil) } // Update returns a copy of the params with updates from the non-zero fields of p2. // NOTE: note: must not modify the original -func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusParams { +func UpdateConsensusParams(params tmproto.ConsensusParams, params2 *abci.ConsensusParams) tmproto.ConsensusParams { res := params // explicit copy if params2 == nil { @@ -188,11 +171,15 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar if params2.Evidence != nil { res.Evidence.MaxAgeNumBlocks = params2.Evidence.MaxAgeNumBlocks res.Evidence.MaxAgeDuration = params2.Evidence.MaxAgeDuration + res.Evidence.MaxBytes = params2.Evidence.MaxBytes } if params2.Validator != nil { // Copy params2.Validator.PubkeyTypes, and set result's value to the copy. // This avoids having to initialize the slice to 0 values, and then write to it again. res.Validator.PubKeyTypes = append([]string{}, params2.Validator.PubKeyTypes...) } + if params2.Version != nil { + res.Version.AppVersion = params2.Version.AppVersion + } return res } diff --git a/types/params_test.go b/types/params_test.go index b446bda33..f3a71ca50 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" abci "github.com/tendermint/tendermint/abci/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) var ( @@ -18,32 +19,34 @@ var ( func TestConsensusParamsValidation(t *testing.T) { testCases := []struct { - params ConsensusParams + params tmproto.ConsensusParams valid bool }{ // test block params - 0: {makeParams(1, 0, 10, 1, valEd25519), true}, - 1: {makeParams(0, 0, 10, 1, valEd25519), false}, - 2: {makeParams(47*1024*1024, 0, 10, 1, valEd25519), true}, - 3: {makeParams(10, 0, 10, 1, valEd25519), true}, - 4: {makeParams(100*1024*1024, 0, 10, 1, valEd25519), true}, - 5: {makeParams(101*1024*1024, 0, 10, 1, valEd25519), false}, - 6: {makeParams(1024*1024*1024, 0, 10, 1, valEd25519), false}, - 7: {makeParams(1024*1024*1024, 0, 10, -1, valEd25519), false}, - 8: {makeParams(1, 0, -10, 1, valEd25519), false}, + 0: {makeParams(1, 0, 10, 2, 0, valEd25519), true}, + 1: {makeParams(0, 0, 10, 2, 0, valEd25519), false}, + 2: {makeParams(47*1024*1024, 0, 10, 2, 0, valEd25519), true}, + 3: {makeParams(10, 0, 10, 2, 0, valEd25519), true}, + 4: {makeParams(100*1024*1024, 0, 10, 2, 0, valEd25519), true}, + 5: {makeParams(101*1024*1024, 0, 10, 2, 0, valEd25519), false}, + 6: {makeParams(1024*1024*1024, 0, 10, 2, 0, valEd25519), false}, + 7: {makeParams(1024*1024*1024, 0, 10, -1, 0, valEd25519), false}, + 8: {makeParams(1, 0, -10, 2, 0, valEd25519), false}, // test evidence params - 9: {makeParams(1, 0, 10, 0, valEd25519), false}, - 10: {makeParams(1, 0, 10, -1, valEd25519), false}, + 9: {makeParams(1, 0, 10, 0, 0, valEd25519), false}, + 10: {makeParams(1, 0, 10, 2, 2, valEd25519), false}, + 11: {makeParams(1000, 0, 10, 2, 1, valEd25519), true}, + 12: {makeParams(1, 0, 10, -1, 0, valEd25519), false}, // test no pubkey type provided - 11: {makeParams(1, 0, 10, 1, []string{}), false}, + 13: {makeParams(1, 0, 10, 2, 0, []string{}), false}, // test invalid pubkey type provided - 12: {makeParams(1, 0, 10, 1, []string{"potatoes make good pubkeys"}), false}, + 14: {makeParams(1, 0, 10, 2, 0, []string{"potatoes make good pubkeys"}), false}, } for i, tc := range testCases { if tc.valid { - assert.NoErrorf(t, tc.params.Validate(), "expected no error for valid params (#%d)", i) + assert.NoErrorf(t, ValidateConsensusParams(tc.params), "expected no error for valid params (#%d)", i) } else { - assert.Errorf(t, tc.params.Validate(), "expected error for non valid params (#%d)", i) + assert.Errorf(t, ValidateConsensusParams(tc.params), "expected error for non valid params (#%d)", i) } } } @@ -52,39 +55,41 @@ func makeParams( blockBytes, blockGas int64, blockTimeIotaMs int64, evidenceAge int64, + maxEvidenceBytes int64, pubkeyTypes []string, -) ConsensusParams { - return ConsensusParams{ - Block: BlockParams{ +) tmproto.ConsensusParams { + return tmproto.ConsensusParams{ + Block: tmproto.BlockParams{ MaxBytes: blockBytes, MaxGas: blockGas, TimeIotaMs: blockTimeIotaMs, }, - Evidence: EvidenceParams{ + Evidence: tmproto.EvidenceParams{ MaxAgeNumBlocks: evidenceAge, MaxAgeDuration: time.Duration(evidenceAge), + MaxBytes: maxEvidenceBytes, }, - Validator: ValidatorParams{ + Validator: tmproto.ValidatorParams{ PubKeyTypes: pubkeyTypes, }, } } func TestConsensusParamsHash(t *testing.T) { - params := []ConsensusParams{ - makeParams(4, 2, 10, 3, valEd25519), - makeParams(1, 4, 10, 3, valEd25519), - makeParams(1, 2, 10, 4, valEd25519), - makeParams(2, 5, 10, 7, valEd25519), - makeParams(1, 7, 10, 6, valEd25519), - makeParams(9, 5, 10, 4, valEd25519), - makeParams(7, 8, 10, 9, valEd25519), - makeParams(4, 6, 10, 5, valEd25519), + params := []tmproto.ConsensusParams{ + makeParams(4, 2, 10, 3, 1, valEd25519), + makeParams(1, 4, 10, 3, 1, valEd25519), + makeParams(1, 2, 10, 4, 1, valEd25519), + makeParams(2, 5, 10, 7, 1, valEd25519), + makeParams(1, 7, 10, 6, 1, valEd25519), + makeParams(9, 5, 10, 4, 1, valEd25519), + makeParams(7, 8, 10, 9, 1, valEd25519), + makeParams(4, 6, 10, 5, 1, valEd25519), } hashes := make([][]byte, len(params)) for i := range params { - hashes[i] = params[i].Hash() + hashes[i] = HashConsensusParams(params[i]) } // make sure there are no duplicates... @@ -99,36 +104,48 @@ func TestConsensusParamsHash(t *testing.T) { func TestConsensusParamsUpdate(t *testing.T) { testCases := []struct { - params ConsensusParams + params tmproto.ConsensusParams updates *abci.ConsensusParams - updatedParams ConsensusParams + updatedParams tmproto.ConsensusParams }{ // empty updates { - makeParams(1, 2, 10, 3, valEd25519), + makeParams(1, 2, 10, 3, 0, valEd25519), &abci.ConsensusParams{}, - makeParams(1, 2, 10, 3, valEd25519), + makeParams(1, 2, 10, 3, 0, valEd25519), }, // fine updates { - makeParams(1, 2, 10, 3, valEd25519), + makeParams(1, 2, 10, 3, 0, valEd25519), &abci.ConsensusParams{ Block: &abci.BlockParams{ MaxBytes: 100, MaxGas: 200, }, - Evidence: &abci.EvidenceParams{ + Evidence: &tmproto.EvidenceParams{ MaxAgeNumBlocks: 300, MaxAgeDuration: time.Duration(300), + MaxBytes: 50, }, - Validator: &abci.ValidatorParams{ + Validator: &tmproto.ValidatorParams{ PubKeyTypes: valSecp256k1, }, }, - makeParams(100, 200, 10, 300, valSecp256k1), + makeParams(100, 200, 10, 300, 50, valSecp256k1), }, } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.params.Update(tc.updates)) + assert.Equal(t, tc.updatedParams, UpdateConsensusParams(tc.params, tc.updates)) } } + +func TestConsensusParamsUpdate_AppVersion(t *testing.T) { + params := makeParams(1, 2, 10, 3, 0, valEd25519) + + assert.EqualValues(t, 0, params.Version.AppVersion) + + updated := UpdateConsensusParams(params, + &abci.ConsensusParams{Version: &tmproto.VersionParams{AppVersion: 1}}) + + assert.EqualValues(t, 1, updated.Version.AppVersion) +} diff --git a/types/part_set.go b/types/part_set.go index 51af767b8..5e76b57fa 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -2,16 +2,17 @@ package types import ( "bytes" + "errors" "fmt" "io" - "sync" - - "github.com/pkg/errors" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) var ( @@ -20,29 +21,32 @@ var ( ) type Part struct { - Index int `json:"index"` - Bytes tmbytes.HexBytes `json:"bytes"` - Proof merkle.SimpleProof `json:"proof"` + Index uint32 `json:"index"` + Bytes tmbytes.HexBytes `json:"bytes"` + Proof merkle.Proof `json:"proof"` } // ValidateBasic performs basic validation. func (part *Part) ValidateBasic() error { - if part.Index < 0 { - return errors.New("negative Index") - } - if len(part.Bytes) > BlockPartSizeBytes { - return errors.Errorf("too big: %d bytes, max: %d", len(part.Bytes), BlockPartSizeBytes) + if len(part.Bytes) > int(BlockPartSizeBytes) { + return fmt.Errorf("too big: %d bytes, max: %d", len(part.Bytes), BlockPartSizeBytes) } if err := part.Proof.ValidateBasic(); err != nil { - return errors.Wrap(err, "wrong Proof") + return fmt.Errorf("wrong Proof: %w", err) } return nil } +// String returns a string representation of Part. +// +// See StringIndented. func (part *Part) String() string { return part.StringIndented("") } +// StringIndented returns an indented Part. +// +// See merkle.Proof#StringIndented func (part *Part) StringIndented(indent string) string { return fmt.Sprintf(`Part{#%v %s Bytes: %X... @@ -54,13 +58,48 @@ func (part *Part) StringIndented(indent string) string { indent) } +func (part *Part) ToProto() (*tmproto.Part, error) { + if part == nil { + return nil, errors.New("nil part") + } + pb := new(tmproto.Part) + proof := part.Proof.ToProto() + + pb.Index = part.Index + pb.Bytes = part.Bytes + pb.Proof = *proof + + return pb, nil +} + +func PartFromProto(pb *tmproto.Part) (*Part, error) { + if pb == nil { + return nil, errors.New("nil part") + } + + part := new(Part) + proof, err := merkle.ProofFromProto(&pb.Proof) + if err != nil { + return nil, err + } + part.Index = pb.Index + part.Bytes = pb.Bytes + part.Proof = *proof + + return part, part.ValidateBasic() +} + //------------------------------------- type PartSetHeader struct { - Total int `json:"total"` + Total uint32 `json:"total"` Hash tmbytes.HexBytes `json:"hash"` } +// String returns a string representation of PartSetHeader. +// +// 1. total number of parts +// 2. first 6 bytes of the hash func (psh PartSetHeader) String() string { return fmt.Sprintf("%v:%X", psh.Total, tmbytes.Fingerprint(psh.Hash)) } @@ -75,48 +114,73 @@ func (psh PartSetHeader) Equals(other PartSetHeader) bool { // ValidateBasic performs basic validation. func (psh PartSetHeader) ValidateBasic() error { - if psh.Total < 0 { - return errors.New("negative Total") - } - // Hash can be empty in case of POLBlockID.PartsHeader in Proposal. + // Hash can be empty in case of POLBlockID.PartSetHeader in Proposal. if err := ValidateHash(psh.Hash); err != nil { - return errors.Wrap(err, "Wrong Hash") + return fmt.Errorf("wrong Hash: %w", err) } return nil } +// ToProto converts PartSetHeader to protobuf +func (psh *PartSetHeader) ToProto() tmproto.PartSetHeader { + if psh == nil { + return tmproto.PartSetHeader{} + } + + return tmproto.PartSetHeader{ + Total: psh.Total, + Hash: psh.Hash, + } +} + +// FromProto sets a protobuf PartSetHeader to the given pointer +func PartSetHeaderFromProto(ppsh *tmproto.PartSetHeader) (*PartSetHeader, error) { + if ppsh == nil { + return nil, errors.New("nil PartSetHeader") + } + psh := new(PartSetHeader) + psh.Total = ppsh.Total + psh.Hash = ppsh.Hash + + return psh, psh.ValidateBasic() +} + //------------------------------------- type PartSet struct { - total int + total uint32 hash []byte - mtx sync.Mutex + mtx tmsync.Mutex parts []*Part partsBitArray *bits.BitArray - count int + count uint32 + // a count of the total size (in bytes). Used to ensure that the + // part set doesn't exceed the maximum block bytes + byteSize int64 } // Returns an immutable, full PartSet from the data bytes. // The data bytes are split into "partSize" chunks, and merkle tree computed. -func NewPartSetFromData(data []byte, partSize int) *PartSet { +// CONTRACT: partSize is greater than zero. +func NewPartSetFromData(data []byte, partSize uint32) *PartSet { // divide data into 4kb parts. - total := (len(data) + partSize - 1) / partSize + total := (uint32(len(data)) + partSize - 1) / partSize parts := make([]*Part, total) partsBytes := make([][]byte, total) - partsBitArray := bits.NewBitArray(total) - for i := 0; i < total; i++ { + partsBitArray := bits.NewBitArray(int(total)) + for i := uint32(0); i < total; i++ { part := &Part{ Index: i, - Bytes: data[i*partSize : tmmath.MinInt(len(data), (i+1)*partSize)], + Bytes: data[i*partSize : tmmath.MinInt(len(data), int((i+1)*partSize))], } parts[i] = part partsBytes[i] = part.Bytes - partsBitArray.SetIndex(i, true) + partsBitArray.SetIndex(int(i), true) } // Compute merkle proofs - root, proofs := merkle.SimpleProofsFromByteSlices(partsBytes) - for i := 0; i < total; i++ { + root, proofs := merkle.ProofsFromByteSlices(partsBytes) + for i := uint32(0); i < total; i++ { parts[i].Proof = *proofs[i] } return &PartSet{ @@ -125,6 +189,7 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet { parts: parts, partsBitArray: partsBitArray, count: total, + byteSize: int64(len(data)), } } @@ -134,8 +199,9 @@ func NewPartSetFromHeader(header PartSetHeader) *PartSet { total: header.Total, hash: header.Hash, parts: make([]*Part, header.Total), - partsBitArray: bits.NewBitArray(header.Total), + partsBitArray: bits.NewBitArray(int(header.Total)), count: 0, + byteSize: 0, } } @@ -164,7 +230,7 @@ func (ps *PartSet) BitArray() *bits.BitArray { func (ps *PartSet) Hash() []byte { if ps == nil { - return nil + return merkle.HashFromByteSlices(nil) } return ps.hash } @@ -176,14 +242,21 @@ func (ps *PartSet) HashesTo(hash []byte) bool { return bytes.Equal(ps.hash, hash) } -func (ps *PartSet) Count() int { +func (ps *PartSet) Count() uint32 { if ps == nil { return 0 } return ps.count } -func (ps *PartSet) Total() int { +func (ps *PartSet) ByteSize() int64 { + if ps == nil { + return 0 + } + return ps.byteSize +} + +func (ps *PartSet) Total() uint32 { if ps == nil { return 0 } @@ -214,8 +287,9 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { // Add part ps.parts[part.Index] = part - ps.partsBitArray.SetIndex(part.Index, true) + ps.partsBitArray.SetIndex(int(part.Index), true) ps.count++ + ps.byteSize += int64(len(part.Bytes)) return true, nil } @@ -271,6 +345,9 @@ func (psr *PartSetReader) Read(p []byte) (n int, err error) { return psr.Read(p) } +// StringShort returns a short version of String. +// +// (Count of Total) func (ps *PartSet) StringShort() string { if ps == nil { return "nil-PartSet" @@ -288,7 +365,7 @@ func (ps *PartSet) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return cdc.MarshalJSON(struct { + return tmjson.Marshal(struct { CountTotal string `json:"count/total"` PartsBitArray *bits.BitArray `json:"parts_bit_array"` }{ diff --git a/types/part_set_test.go b/types/part_set_test.go index 854848a44..c6ea0f452 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -17,23 +17,25 @@ const ( func TestBasicPartSet(t *testing.T) { // Construct random data of size partSize * 100 - data := tmrand.Bytes(testPartSize * 100) + nParts := 100 + data := tmrand.Bytes(testPartSize * nParts) partSet := NewPartSetFromData(data, testPartSize) assert.NotEmpty(t, partSet.Hash()) - assert.Equal(t, 100, partSet.Total()) - assert.Equal(t, 100, partSet.BitArray().Size()) + assert.EqualValues(t, nParts, partSet.Total()) + assert.Equal(t, nParts, partSet.BitArray().Size()) assert.True(t, partSet.HashesTo(partSet.Hash())) assert.True(t, partSet.IsComplete()) - assert.Equal(t, 100, partSet.Count()) + assert.EqualValues(t, nParts, partSet.Count()) + assert.EqualValues(t, testPartSize*nParts, partSet.ByteSize()) // Test adding parts to a new partSet. partSet2 := NewPartSetFromHeader(partSet.Header()) assert.True(t, partSet2.HasHeader(partSet.Header())) - for i := 0; i < partSet.Total(); i++ { + for i := 0; i < int(partSet.Total()); i++ { part := partSet.GetPart(i) - //t.Logf("\n%v", part) + // t.Logf("\n%v", part) added, err := partSet2.AddPart(part) if !added || err != nil { t.Errorf("failed to add part %v, error: %v", i, err) @@ -49,7 +51,8 @@ func TestBasicPartSet(t *testing.T) { assert.Nil(t, err) assert.Equal(t, partSet.Hash(), partSet2.Hash()) - assert.Equal(t, 100, partSet2.Total()) + assert.EqualValues(t, nParts, partSet2.Total()) + assert.EqualValues(t, nParts*testPartSize, partSet.ByteSize()) assert.True(t, partSet2.IsComplete()) // Reconstruct data, assert that they are equal. @@ -92,7 +95,6 @@ func TestPartSetHeaderValidateBasic(t *testing.T) { expectErr bool }{ {"Good PartSet", func(psHeader *PartSetHeader) {}, false}, - {"Negative Total", func(psHeader *PartSetHeader) { psHeader.Total = -2 }, true}, {"Invalid Hash", func(psHeader *PartSetHeader) { psHeader.Hash = make([]byte, 1) }, true}, } for _, tc := range testCases { @@ -114,10 +116,9 @@ func TestPartValidateBasic(t *testing.T) { expectErr bool }{ {"Good Part", func(pt *Part) {}, false}, - {"Negative index", func(pt *Part) { pt.Index = -1 }, true}, {"Too big part", func(pt *Part) { pt.Bytes = make([]byte, BlockPartSizeBytes+1) }, true}, {"Too big proof", func(pt *Part) { - pt.Proof = merkle.SimpleProof{ + pt.Proof = merkle.Proof{ Total: 1, Index: 1, LeafHash: make([]byte, 1024*1024), @@ -136,3 +137,58 @@ func TestPartValidateBasic(t *testing.T) { }) } } + +func TestParSetHeaderProtoBuf(t *testing.T) { + testCases := []struct { + msg string + ps1 *PartSetHeader + expPass bool + }{ + {"success empty", &PartSetHeader{}, true}, + {"success", + &PartSetHeader{Total: 1, Hash: []byte("hash")}, true}, + } + + for _, tc := range testCases { + protoBlockID := tc.ps1.ToProto() + + psh, err := PartSetHeaderFromProto(&protoBlockID) + if tc.expPass { + require.Equal(t, tc.ps1, psh, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func TestPartProtoBuf(t *testing.T) { + + proof := merkle.Proof{ + Total: 1, + Index: 1, + LeafHash: tmrand.Bytes(32), + } + testCases := []struct { + msg string + ps1 *Part + expPass bool + }{ + {"failure empty", &Part{}, false}, + {"failure nil", nil, false}, + {"success", + &Part{Index: 1, Bytes: tmrand.Bytes(32), Proof: proof}, true}, + } + + for _, tc := range testCases { + proto, err := tc.ps1.ToProto() + if tc.expPass { + require.NoError(t, err, tc.msg) + } + + p, err := PartFromProto(proto) + if tc.expPass { + require.NoError(t, err) + require.Equal(t, tc.ps1, p, tc.msg) + } + } +} diff --git a/types/priv_validator.go b/types/priv_validator.go index fbe8cebf0..49211773a 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) // PrivValidator defines the functionality of a local Tendermint validator @@ -14,13 +15,10 @@ import ( type PrivValidator interface { GetPubKey() (crypto.PubKey, error) - SignVote(chainID string, vote *Vote) error - SignProposal(chainID string, proposal *Proposal) error + SignVote(chainID string, vote *tmproto.Vote) error + SignProposal(chainID string, proposal *tmproto.Proposal) error } -//---------------------------------------- -// Misc. - type PrivValidatorsByAddress []PrivValidator func (pvs PrivValidatorsByAddress) Len() int { @@ -41,9 +39,7 @@ func (pvs PrivValidatorsByAddress) Less(i, j int) bool { } func (pvs PrivValidatorsByAddress) Swap(i, j int) { - it := pvs[i] - pvs[i] = pvs[j] - pvs[j] = it + pvs[i], pvs[j] = pvs[j], pvs[i] } //---------------------------------------- @@ -74,12 +70,13 @@ func (pv MockPV) GetPubKey() (crypto.PubKey, error) { } // Implements PrivValidator. -func (pv MockPV) SignVote(chainID string, vote *Vote) error { +func (pv MockPV) SignVote(chainID string, vote *tmproto.Vote) error { useChainID := chainID if pv.breakVoteSigning { useChainID = "incorrect-chain-id" } - signBytes := vote.SignBytes(useChainID) + + signBytes := VoteSignBytes(useChainID, vote) sig, err := pv.PrivKey.Sign(signBytes) if err != nil { return err @@ -89,12 +86,13 @@ func (pv MockPV) SignVote(chainID string, vote *Vote) error { } // Implements PrivValidator. -func (pv MockPV) SignProposal(chainID string, proposal *Proposal) error { +func (pv MockPV) SignProposal(chainID string, proposal *tmproto.Proposal) error { useChainID := chainID if pv.breakProposalSigning { useChainID = "incorrect-chain-id" } - signBytes := proposal.SignBytes(useChainID) + + signBytes := ProposalSignBytes(useChainID, proposal) sig, err := pv.PrivKey.Sign(signBytes) if err != nil { return err @@ -103,6 +101,15 @@ func (pv MockPV) SignProposal(chainID string, proposal *Proposal) error { return nil } +func (pv MockPV) ExtractIntoValidator(votingPower int64) *Validator { + pubKey, _ := pv.GetPubKey() + return &Validator{ + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: votingPower, + } +} + // String returns a string representation of the MockPV. func (pv MockPV) String() string { mpv, _ := pv.GetPubKey() // mockPV will never return an error, ignored here @@ -122,12 +129,12 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") // Implements PrivValidator. -func (pv *ErroringMockPV) SignVote(chainID string, vote *Vote) error { +func (pv *ErroringMockPV) SignVote(chainID string, vote *tmproto.Vote) error { return ErroringMockPVErr } // Implements PrivValidator. -func (pv *ErroringMockPV) SignProposal(chainID string, proposal *Proposal) error { +func (pv *ErroringMockPV) SignProposal(chainID string, proposal *tmproto.Proposal) error { return ErroringMockPVErr } diff --git a/types/proposal.go b/types/proposal.go index 8175c8a1f..20f9e5fbe 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -5,7 +5,9 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/libs/bytes" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/protoio" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) @@ -21,10 +23,10 @@ var ( // a so-called Proof-of-Lock (POL) round, as noted in the POLRound. // If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { - Type SignedMsgType + Type tmproto.SignedMsgType Height int64 `json:"height"` - Round int `json:"round"` - POLRound int `json:"pol_round"` // -1 if null. + Round int32 `json:"round"` // there can not be greater than 2_147_483_647 rounds + POLRound int32 `json:"pol_round"` // -1 if null. BlockID BlockID `json:"block_id"` Timestamp time.Time `json:"timestamp"` Signature []byte `json:"signature"` @@ -32,9 +34,9 @@ type Proposal struct { // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int, polRound int, blockID BlockID) *Proposal { +func NewProposal(height int64, round int32, polRound int32, blockID BlockID) *Proposal { return &Proposal{ - Type: ProposalType, + Type: tmproto.ProposalType, Height: height, Round: round, BlockID: blockID, @@ -45,7 +47,7 @@ func NewProposal(height int64, round int, polRound int, blockID BlockID) *Propos // ValidateBasic performs basic validation. func (p *Proposal) ValidateBasic() error { - if p.Type != ProposalType { + if p.Type != tmproto.ProposalType { return errors.New("invalid Type") } if p.Height < 0 { @@ -70,6 +72,7 @@ func (p *Proposal) ValidateBasic() error { if len(p.Signature) == 0 { return errors.New("signature is missing") } + if len(p.Signature) > MaxSignatureSize { return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize) } @@ -77,21 +80,82 @@ func (p *Proposal) ValidateBasic() error { } // String returns a string representation of the Proposal. +// +// 1. height +// 2. round +// 3. block ID +// 4. POL round +// 5. first 6 bytes of signature +// 6. timestamp +// +// See BlockID#String. func (p *Proposal) String() string { return fmt.Sprintf("Proposal{%v/%v (%v, %v) %X @ %s}", p.Height, p.Round, p.BlockID, p.POLRound, - bytes.Fingerprint(p.Signature), + tmbytes.Fingerprint(p.Signature), CanonicalTime(p.Timestamp)) } -// SignBytes returns the Proposal bytes for signing -func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, p)) +// ProposalSignBytes returns the proto-encoding of the canonicalized Proposal, +// for signing. Panics if the marshaling fails. +// +// The encoded Protobuf message is varint length-prefixed (using MarshalDelimited) +// for backwards-compatibility with the Amino encoding, due to e.g. hardware +// devices that rely on this encoding. +// +// See CanonicalizeProposal +func ProposalSignBytes(chainID string, p *tmproto.Proposal) []byte { + pb := CanonicalizeProposal(chainID, p) + bz, err := protoio.MarshalDelimited(&pb) if err != nil { panic(err) } + return bz } + +// ToProto converts Proposal to protobuf +func (p *Proposal) ToProto() *tmproto.Proposal { + if p == nil { + return &tmproto.Proposal{} + } + pb := new(tmproto.Proposal) + + pb.BlockID = p.BlockID.ToProto() + pb.Type = p.Type + pb.Height = p.Height + pb.Round = p.Round + pb.PolRound = p.POLRound + pb.Timestamp = p.Timestamp + pb.Signature = p.Signature + + return pb +} + +// FromProto sets a protobuf Proposal to the given pointer. +// It returns an error if the proposal is invalid. +func ProposalFromProto(pp *tmproto.Proposal) (*Proposal, error) { + if pp == nil { + return nil, errors.New("nil proposal") + } + + p := new(Proposal) + + blockID, err := BlockIDFromProto(&pp.BlockID) + if err != nil { + return nil, err + } + + p.BlockID = *blockID + p.Type = pp.Type + p.Height = pp.Height + p.Round = pp.Round + p.POLRound = pp.PolRound + p.Timestamp = pp.Timestamp + p.Signature = pp.Signature + + return p, p.ValidateBasic() +} diff --git a/types/proposal_test.go b/types/proposal_test.go index e4ea19183..71d4d62cc 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -5,13 +5,20 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/libs/protoio" + tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -var testProposal *Proposal +var ( + testProposal *Proposal + pbp *tmproto.Proposal +) func init() { var stamp, err = time.Parse(TimeFormat, "2018-02-11T07:09:22.765Z") @@ -19,26 +26,29 @@ func init() { panic(err) } testProposal = &Proposal{ - Height: 12345, - Round: 23456, - BlockID: BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}}, + Height: 12345, + Round: 23456, + BlockID: BlockID{Hash: []byte("--June_15_2020_amino_was_removed"), + PartSetHeader: PartSetHeader{Total: 111, Hash: []byte("--June_15_2020_amino_was_removed")}}, POLRound: -1, Timestamp: stamp, } + pbp = testProposal.ToProto() } func TestProposalSignable(t *testing.T) { chainID := "test_chain_id" - signBytes := testProposal.SignBytes(chainID) + signBytes := ProposalSignBytes(chainID, pbp) + pb := CanonicalizeProposal(chainID, pbp) - expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, testProposal)) + expected, err := protoio.MarshalDelimited(&pb) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Proposal") } func TestProposalString(t *testing.T) { str := testProposal.String() - expected := `Proposal{12345/23456 (010203:111:626C6F636B70, -1) 000000000000 @ 2018-02-11T07:09:22.765Z}` + expected := `Proposal{12345/23456 (2D2D4A756E655F31355F323032305F616D696E6F5F7761735F72656D6F766564:111:2D2D4A756E65, -1) 000000000000 @ 2018-02-11T07:09:22.765Z}` //nolint:lll // ignore line length for tests if str != expected { t.Errorf("got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str) } @@ -51,41 +61,49 @@ func TestProposalVerifySignature(t *testing.T) { prop := NewProposal( 4, 2, 2, - BlockID{[]byte{1, 2, 3}, PartSetHeader{777, []byte("proper")}}) - signBytes := prop.SignBytes("test_chain_id") + BlockID{tmrand.Bytes(tmhash.Size), PartSetHeader{777, tmrand.Bytes(tmhash.Size)}}) + p := prop.ToProto() + signBytes := ProposalSignBytes("test_chain_id", p) // sign it - err = privVal.SignProposal("test_chain_id", prop) + err = privVal.SignProposal("test_chain_id", p) require.NoError(t, err) + prop.Signature = p.Signature // verify the same proposal - valid := pubKey.VerifyBytes(signBytes, prop.Signature) + valid := pubKey.VerifySignature(signBytes, prop.Signature) require.True(t, valid) // serialize, deserialize and verify again.... - newProp := new(Proposal) - bs, err := cdc.MarshalBinaryLengthPrefixed(prop) + newProp := new(tmproto.Proposal) + pb := prop.ToProto() + + bs, err := proto.Marshal(pb) + require.NoError(t, err) + + err = proto.Unmarshal(bs, newProp) require.NoError(t, err) - err = cdc.UnmarshalBinaryLengthPrefixed(bs, &newProp) + + np, err := ProposalFromProto(newProp) require.NoError(t, err) // verify the transmitted proposal - newSignBytes := newProp.SignBytes("test_chain_id") + newSignBytes := ProposalSignBytes("test_chain_id", pb) require.Equal(t, string(signBytes), string(newSignBytes)) - valid = pubKey.VerifyBytes(newSignBytes, newProp.Signature) + valid = pubKey.VerifySignature(newSignBytes, np.Signature) require.True(t, valid) } func BenchmarkProposalWriteSignBytes(b *testing.B) { for i := 0; i < b.N; i++ { - testProposal.SignBytes("test_chain_id") + ProposalSignBytes("test_chain_id", pbp) } } func BenchmarkProposalSign(b *testing.B) { privVal := NewMockPV() for i := 0; i < b.N; i++ { - err := privVal.SignProposal("test_chain_id", testProposal) + err := privVal.SignProposal("test_chain_id", pbp) if err != nil { b.Error(err) } @@ -94,13 +112,13 @@ func BenchmarkProposalSign(b *testing.B) { func BenchmarkProposalVerifySignature(b *testing.B) { privVal := NewMockPV() - err := privVal.SignProposal("test_chain_id", testProposal) + err := privVal.SignProposal("test_chain_id", pbp) require.NoError(b, err) pubKey, err := privVal.GetPubKey() require.NoError(b, err) for i := 0; i < b.N; i++ { - pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) + pubKey.VerifySignature(ProposalSignBytes("test_chain_id", pbp), testProposal.Signature) } } @@ -113,7 +131,7 @@ func TestProposalValidateBasic(t *testing.T) { expectErr bool }{ {"Good Proposal", func(p *Proposal) {}, false}, - {"Invalid Type", func(p *Proposal) { p.Type = PrecommitType }, true}, + {"Invalid Type", func(p *Proposal) { p.Type = tmproto.PrecommitType }, true}, {"Invalid Height", func(p *Proposal) { p.Height = -1 }, true}, {"Invalid Round", func(p *Proposal) { p.Round = -1 }, true}, {"Invalid POLRound", func(p *Proposal) { p.POLRound = -2 }, true}, @@ -127,7 +145,7 @@ func TestProposalValidateBasic(t *testing.T) { p.Signature = make([]byte, MaxSignatureSize+1) }, true}, } - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) for _, tc := range testCases { tc := tc @@ -135,10 +153,40 @@ func TestProposalValidateBasic(t *testing.T) { prop := NewProposal( 4, 2, 2, blockID) - err := privVal.SignProposal("test_chain_id", prop) + p := prop.ToProto() + err := privVal.SignProposal("test_chain_id", p) + prop.Signature = p.Signature require.NoError(t, err) tc.malleateProposal(prop) assert.Equal(t, tc.expectErr, prop.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) } } + +func TestProposalProtoBuf(t *testing.T) { + proposal := NewProposal(1, 2, 3, makeBlockID([]byte("hash"), 2, []byte("part_set_hash"))) + proposal.Signature = []byte("sig") + proposal2 := NewProposal(1, 2, 3, BlockID{}) + + testCases := []struct { + msg string + p1 *Proposal + expPass bool + }{ + {"success", proposal, true}, + {"success", proposal2, false}, // blcokID cannot be empty + {"empty proposal failure validatebasic", &Proposal{}, false}, + {"nil proposal", nil, false}, + } + for _, tc := range testCases { + protoProposal := tc.p1.ToProto() + + p, err := ProposalFromProto(protoProposal) + if tc.expPass { + require.NoError(t, err) + require.Equal(t, tc.p1, p, tc.msg) + } else { + require.Error(t, err) + } + } +} diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go deleted file mode 100644 index af3d5faf5..000000000 --- a/types/proto3/block.pb.go +++ /dev/null @@ -1,401 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: types/proto3/block.proto - -package proto3 - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PartSetHeader struct { - Total int32 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{0} -} -func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) -} -func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) -} -func (m *PartSetHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartSetHeader.Merge(m, src) -} -func (m *PartSetHeader) XXX_Size() int { - return xxx_messageInfo_PartSetHeader.Size(m) -} -func (m *PartSetHeader) XXX_DiscardUnknown() { - xxx_messageInfo_PartSetHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo - -func (m *PartSetHeader) GetTotal() int32 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *PartSetHeader) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader,proto3" json:"PartsHeader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{1} -} -func (m *BlockID) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BlockID.Unmarshal(m, b) -} -func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) -} -func (m *BlockID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockID.Merge(m, src) -} -func (m *BlockID) XXX_Size() int { - return xxx_messageInfo_BlockID.Size(m) -} -func (m *BlockID) XXX_DiscardUnknown() { - xxx_messageInfo_BlockID.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockID proto.InternalMessageInfo - -func (m *BlockID) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *BlockID) GetPartsHeader() *PartSetHeader { - if m != nil { - return m.PartsHeader - } - return nil -} - -type Header struct { - // basic block info - Version *Version `protobuf:"bytes,1,opt,name=Version,proto3" json:"Version,omitempty"` - ChainID string `protobuf:"bytes,2,opt,name=ChainID,proto3" json:"ChainID,omitempty"` - Height int64 `protobuf:"varint,3,opt,name=Height,proto3" json:"Height,omitempty"` - Time *Timestamp `protobuf:"bytes,4,opt,name=Time,proto3" json:"Time,omitempty"` - // prev block info - LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID,proto3" json:"LastBlockID,omitempty"` - // hashes of block data - LastCommitHash []byte `protobuf:"bytes,6,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` - DataHash []byte `protobuf:"bytes,7,opt,name=DataHash,proto3" json:"DataHash,omitempty"` - // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,8,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` - NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=NextValidatorsHash,proto3" json:"NextValidatorsHash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` - // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` - ProposerAddress []byte `protobuf:"bytes,14,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{2} -} -func (m *Header) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Header.Unmarshal(m, b) -} -func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Header.Marshal(b, m, deterministic) -} -func (m *Header) XXX_Merge(src proto.Message) { - xxx_messageInfo_Header.Merge(m, src) -} -func (m *Header) XXX_Size() int { - return xxx_messageInfo_Header.Size(m) -} -func (m *Header) XXX_DiscardUnknown() { - xxx_messageInfo_Header.DiscardUnknown(m) -} - -var xxx_messageInfo_Header proto.InternalMessageInfo - -func (m *Header) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *Header) GetChainID() string { - if m != nil { - return m.ChainID - } - return "" -} - -func (m *Header) GetHeight() int64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *Header) GetTime() *Timestamp { - if m != nil { - return m.Time - } - return nil -} - -func (m *Header) GetLastBlockID() *BlockID { - if m != nil { - return m.LastBlockID - } - return nil -} - -func (m *Header) GetLastCommitHash() []byte { - if m != nil { - return m.LastCommitHash - } - return nil -} - -func (m *Header) GetDataHash() []byte { - if m != nil { - return m.DataHash - } - return nil -} - -func (m *Header) GetValidatorsHash() []byte { - if m != nil { - return m.ValidatorsHash - } - return nil -} - -func (m *Header) GetNextValidatorsHash() []byte { - if m != nil { - return m.NextValidatorsHash - } - return nil -} - -func (m *Header) GetConsensusHash() []byte { - if m != nil { - return m.ConsensusHash - } - return nil -} - -func (m *Header) GetAppHash() []byte { - if m != nil { - return m.AppHash - } - return nil -} - -func (m *Header) GetLastResultsHash() []byte { - if m != nil { - return m.LastResultsHash - } - return nil -} - -func (m *Header) GetEvidenceHash() []byte { - if m != nil { - return m.EvidenceHash - } - return nil -} - -func (m *Header) GetProposerAddress() []byte { - if m != nil { - return m.ProposerAddress - } - return nil -} - -type Version struct { - Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` - App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{3} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetBlock() uint64 { - if m != nil { - return m.Block - } - return 0 -} - -func (m *Version) GetApp() uint64 { - if m != nil { - return m.App - } - return 0 -} - -// Timestamp wraps how amino encodes time. -// This is the protobuf well-known type protobuf/timestamp.proto -// See: -// https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// NOTE/XXX: nanos do not get skipped if they are zero in amino. -type Timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_760f4d5ceb2a11f0, []int{4} -} -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*PartSetHeader)(nil), "tendermint.types.proto3.PartSetHeader") - proto.RegisterType((*BlockID)(nil), "tendermint.types.proto3.BlockID") - proto.RegisterType((*Header)(nil), "tendermint.types.proto3.Header") - proto.RegisterType((*Version)(nil), "tendermint.types.proto3.Version") - proto.RegisterType((*Timestamp)(nil), "tendermint.types.proto3.Timestamp") -} - -func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_760f4d5ceb2a11f0) } - -var fileDescriptor_760f4d5ceb2a11f0 = []byte{ - // 468 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x8b, 0x13, 0x31, - 0x10, 0xc7, 0x59, 0xbb, 0x6d, 0xaf, 0xb3, 0xed, 0x29, 0x83, 0xe8, 0xe2, 0x53, 0x59, 0xe4, 0xe8, - 0x8b, 0x5b, 0xbc, 0x03, 0x41, 0x7d, 0xea, 0x0f, 0xa1, 0x07, 0x22, 0x47, 0x3c, 0xee, 0xc1, 0xb7, - 0xb4, 0x1b, 0xda, 0x60, 0x37, 0x59, 0x92, 0x54, 0xf4, 0x1f, 0xf4, 0xef, 0x92, 0x4c, 0xb6, 0xbd, - 0x6e, 0xb1, 0xdc, 0x53, 0xf3, 0x9d, 0xf9, 0xcc, 0x37, 0xb3, 0x93, 0x29, 0xa4, 0xee, 0x4f, 0x25, - 0xec, 0xb8, 0x32, 0xda, 0xe9, 0x9b, 0xf1, 0x72, 0xab, 0x57, 0x3f, 0x73, 0x12, 0xf8, 0xda, 0x09, - 0x55, 0x08, 0x53, 0x4a, 0xe5, 0x72, 0x82, 0x42, 0xfc, 0x26, 0xfb, 0x08, 0x83, 0x3b, 0x6e, 0xdc, - 0x77, 0xe1, 0x16, 0x82, 0x17, 0xc2, 0xe0, 0x4b, 0x68, 0xdf, 0x6b, 0xc7, 0xb7, 0x69, 0x34, 0x8c, - 0x46, 0x6d, 0x16, 0x04, 0x22, 0xc4, 0x0b, 0x6e, 0x37, 0xe9, 0xb3, 0x61, 0x34, 0xea, 0x33, 0x3a, - 0x67, 0x6b, 0xe8, 0x4e, 0xfd, 0x15, 0xb7, 0xf3, 0x43, 0x3a, 0x7a, 0x4c, 0xe3, 0x02, 0x12, 0xef, - 0x6c, 0x83, 0x2f, 0x55, 0x26, 0xd7, 0x57, 0xf9, 0x99, 0x46, 0xf2, 0x46, 0x17, 0xec, 0xb8, 0x34, - 0xfb, 0x1b, 0x43, 0xa7, 0xee, 0xee, 0x13, 0x74, 0x1f, 0x84, 0xb1, 0x52, 0x2b, 0xba, 0x2b, 0xb9, - 0x1e, 0x9e, 0x35, 0xac, 0x39, 0xb6, 0x2f, 0xc0, 0x14, 0xba, 0xb3, 0x0d, 0x97, 0xea, 0x76, 0x4e, - 0xcd, 0xf4, 0xd8, 0x5e, 0xe2, 0x2b, 0xef, 0x2f, 0xd7, 0x1b, 0x97, 0xb6, 0x86, 0xd1, 0xa8, 0xc5, - 0x6a, 0x85, 0x1f, 0x20, 0xbe, 0x97, 0xa5, 0x48, 0x63, 0xba, 0x2a, 0x3b, 0x7b, 0x95, 0x87, 0xac, - 0xe3, 0x65, 0xc5, 0x88, 0xc7, 0x29, 0x24, 0x5f, 0xb9, 0x75, 0xf5, 0x74, 0xd2, 0xf6, 0x13, 0x9d, - 0xd6, 0x1c, 0x3b, 0x2e, 0xc2, 0x2b, 0xb8, 0xf4, 0x72, 0xa6, 0xcb, 0x52, 0x3a, 0x1a, 0x6e, 0x87, - 0x86, 0x7b, 0x12, 0xc5, 0x37, 0x70, 0x31, 0xe7, 0x8e, 0x13, 0xd1, 0x25, 0xe2, 0xa0, 0xbd, 0xc7, - 0x03, 0xdf, 0xca, 0x82, 0x3b, 0x6d, 0x2c, 0x11, 0x17, 0xc1, 0xa3, 0x19, 0xc5, 0x1c, 0xf0, 0x9b, - 0xf8, 0xed, 0x4e, 0xd8, 0x1e, 0xb1, 0xff, 0xc9, 0xe0, 0x5b, 0x18, 0xcc, 0xb4, 0xb2, 0x42, 0xd9, - 0x5d, 0x40, 0x81, 0xd0, 0x66, 0xd0, 0xcf, 0x7b, 0x52, 0x55, 0x94, 0x4f, 0x28, 0xbf, 0x97, 0x38, - 0x82, 0xe7, 0xfe, 0x2b, 0x98, 0xb0, 0xbb, 0xad, 0x0b, 0x0e, 0x7d, 0x22, 0x4e, 0xc3, 0x98, 0x41, - 0xff, 0xcb, 0x2f, 0x59, 0x08, 0xb5, 0x12, 0x84, 0x0d, 0x08, 0x6b, 0xc4, 0xbc, 0xdb, 0x9d, 0xd1, - 0x95, 0xb6, 0xc2, 0x4c, 0x8a, 0xc2, 0x08, 0x6b, 0xd3, 0xcb, 0xe0, 0x76, 0x12, 0xce, 0xde, 0x1f, - 0xb6, 0xc7, 0xaf, 0x39, 0x4d, 0x9a, 0xd6, 0x28, 0x66, 0x41, 0xe0, 0x0b, 0x68, 0x4d, 0xaa, 0x8a, - 0xd6, 0x23, 0x66, 0xfe, 0x98, 0x7d, 0x86, 0xde, 0xe1, 0x75, 0xfd, 0x17, 0x59, 0xb1, 0xd2, 0xaa, - 0xb0, 0x54, 0xd6, 0x62, 0x7b, 0xe9, 0xed, 0x14, 0x57, 0xda, 0x52, 0x69, 0x9b, 0x05, 0x31, 0x1d, - 0xff, 0x78, 0xb7, 0x96, 0x6e, 0xb3, 0x5b, 0xe6, 0x2b, 0x5d, 0x8e, 0x1f, 0x9f, 0xbf, 0x71, 0x3c, - 0xfa, 0xcb, 0x2e, 0x3b, 0xe1, 0xf7, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x0b, 0x4e, 0x15, - 0xc9, 0x03, 0x00, 0x00, -} diff --git a/types/proto3/block.proto b/types/proto3/block.proto deleted file mode 100644 index adaa0a00d..000000000 --- a/types/proto3/block.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; - -package tendermint.types.proto3; -option go_package = "github.com/tendermint/tendermint/types/proto3"; - -message PartSetHeader { - int32 Total = 1; - bytes Hash = 2; -} - -message BlockID { - bytes Hash = 1; - PartSetHeader PartsHeader = 2; -} - -message Header { - // basic block info - Version Version = 1; - string ChainID = 2; - int64 Height = 3; - Timestamp Time = 4; - - // prev block info - BlockID LastBlockID = 5; - - // hashes of block data - bytes LastCommitHash = 6; // commit from validators from the last block - bytes DataHash = 7; // transactions - - // hashes from the app output from the prev block - bytes ValidatorsHash = 8; // validators for the current block - bytes NextValidatorsHash = 9; // validators for the next block - bytes ConsensusHash = 10; // consensus params for current block - bytes AppHash = 11; // state after txs from the previous block - bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block - - // consensus info - bytes EvidenceHash = 13; // evidence included in the block - bytes ProposerAddress = 14; // original proposer of the block -} - -message Version { - uint64 Block = 1; - uint64 App = 2; -} - -// Timestamp wraps how amino encodes time. -// This is the protobuf well-known type protobuf/timestamp.proto -// See: -// https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// NOTE/XXX: nanos do not get skipped if they are zero in amino. -message Timestamp { - int64 seconds = 1; - int32 nanos = 2; -} diff --git a/types/proto3/result.go b/types/proto3/result.go deleted file mode 100644 index ee5269bd3..000000000 --- a/types/proto3/result.go +++ /dev/null @@ -1,74 +0,0 @@ -package proto3 - -import ( - "bytes" - "encoding/json" - - "github.com/gogo/protobuf/jsonpb" -) - -//--------------------------------------------------------------------------- -// override JSON marshalling so we emit defaults (ie. disable omitempty) - -var ( - jsonpbMarshaller = jsonpb.Marshaler{ - EnumsAsInts: true, - EmitDefaults: true, - } - jsonpbUnmarshaller = jsonpb.Unmarshaler{} -) - -func (r *PartSetHeader) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *PartSetHeader) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -func (r *Header) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *Header) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -func (r *Version) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *Version) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -func (r *Timestamp) MarshalJSON() ([]byte, error) { - s, err := jsonpbMarshaller.MarshalToString(r) - return []byte(s), err -} - -func (r *Timestamp) UnmarshalJSON(b []byte) error { - reader := bytes.NewBuffer(b) - return jsonpbUnmarshaller.Unmarshal(reader, r) -} - -// Some compile time assertions to ensure we don't -// have accidental runtime surprises later on. -// jsonEncodingRoundTripper ensures that asserted -// interfaces implement both MarshalJSON and UnmarshalJSON - -type jsonRoundTripper interface { - json.Marshaler - json.Unmarshaler -} - -var _ jsonRoundTripper = (*PartSetHeader)(nil) -var _ jsonRoundTripper = (*Header)(nil) -var _ jsonRoundTripper = (*Version)(nil) -var _ jsonRoundTripper = (*Timestamp)(nil) diff --git a/types/proto3_test.go b/types/proto3_test.go deleted file mode 100644 index f5db1a83f..000000000 --- a/types/proto3_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package types - -import ( - "testing" - "time" - - "github.com/golang/protobuf/proto" // nolint: staticcheck // still used by gogoproto - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/types/proto3" -) - -func TestProto3Compatibility(t *testing.T) { - tm, err := time.Parse("Mon Jan 2 15:04:05 -0700 MST 2006", "Mon Jan 2 15:04:05 -0700 MST 2006") - assert.NoError(t, err) - // add some nanos, otherwise protobuf will skip over this while amino (still) won't! - tm = tm.Add(50000 * time.Nanosecond) - seconds := tm.Unix() - nanos := int32(tm.Nanosecond()) - t.Log("seconds", seconds) - t.Log("nanos", nanos) - - pbHeader := proto3.Header{ - ChainID: "cosmos", - Height: 150, - Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, - LastBlockID: &proto3.BlockID{ - Hash: []byte("some serious hashing"), - PartsHeader: &proto3.PartSetHeader{ - Total: 8, - Hash: []byte("some more serious hashing"), - }, - }, - LastCommitHash: []byte("commit hash"), - DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), - } - aminoHeader := Header{ - ChainID: "cosmos", - Height: 150, - Time: tm, - LastBlockID: BlockID{ - Hash: []byte("some serious hashing"), - PartsHeader: PartSetHeader{ - Total: 8, - Hash: []byte("some more serious hashing"), - }, - }, - LastCommitHash: []byte("commit hash"), - DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), - } - ab, err := cdc.MarshalBinaryBare(aminoHeader) - assert.NoError(t, err, "unexpected error") - - pb, err := proto.Marshal(&pbHeader) - assert.NoError(t, err, "unexpected error") - // This works: - assert.Equal(t, ab, pb, "encoding doesn't match") - - emptyLastBlockPb := proto3.Header{ - ChainID: "cosmos", - Height: 150, - Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, - LastCommitHash: []byte("commit hash"), - DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), - } - emptyLastBlockAm := Header{ - ChainID: "cosmos", - Height: 150, - Time: tm, - LastCommitHash: []byte("commit hash"), - DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), - } - - ab, err = cdc.MarshalBinaryBare(emptyLastBlockAm) - assert.NoError(t, err, "unexpected error") - - pb, err = proto.Marshal(&emptyLastBlockPb) - assert.NoError(t, err, "unexpected error") - // This works: - assert.Equal(t, ab, pb, "encoding doesn't match") - - pb, err = proto.Marshal(&proto3.Header{}) - assert.NoError(t, err, "unexpected error") - t.Log(pb) - - // While in protobuf Header{} encodes to an empty byte slice it does not in amino: - ab, err = cdc.MarshalBinaryBare(Header{}) - assert.NoError(t, err, "unexpected error") - t.Log(ab) - - pb, err = proto.Marshal(&proto3.Timestamp{}) - assert.NoError(t, err, "unexpected error") - t.Log(pb) - - ab, err = cdc.MarshalBinaryBare(time.Time{}) - assert.NoError(t, err, "unexpected error") - t.Log(ab) -} diff --git a/types/protobuf.go b/types/protobuf.go index 52815593f..1ee094a9f 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -1,37 +1,27 @@ package types import ( - "fmt" - "reflect" - "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/sr25519" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) //------------------------------------------------------- // Use strings to distinguish types in ABCI messages const ( - ABCIEvidenceTypeDuplicateVote = "duplicate/vote" - ABCIEvidenceTypeMock = "mock/evidence" -) - -const ( - ABCIPubKeyTypeEd25519 = "ed25519" - ABCIPubKeyTypeSr25519 = "sr25519" - ABCIPubKeyTypeSecp256k1 = "secp256k1" + ABCIPubKeyTypeEd25519 = ed25519.KeyType + ABCIPubKeyTypeSecp256k1 = secp256k1.KeyType ) // TODO: Make non-global by allowing for registration of more pubkey types -var ABCIPubKeyTypesToAminoNames = map[string]string{ - ABCIPubKeyTypeEd25519: ed25519.PubKeyAminoName, - ABCIPubKeyTypeSr25519: sr25519.PubKeyAminoName, - ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyAminoName, +var ABCIPubKeyTypesToNames = map[string]string{ + ABCIPubKeyTypeEd25519: ed25519.PubKeyName, + ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyName, } //------------------------------------------------------- @@ -42,17 +32,14 @@ var TM2PB = tm2pb{} type tm2pb struct{} -func (tm2pb) Header(header *Header) abci.Header { - return abci.Header{ - Version: abci.Version{ - Block: header.Version.Block.Uint64(), - App: header.Version.App.Uint64(), - }, +func (tm2pb) Header(header *Header) tmproto.Header { + return tmproto.Header{ + Version: header.Version, ChainID: header.ChainID, Height: header.Height, Time: header.Time, - LastBlockId: TM2PB.BlockID(header.LastBlockID), + LastBlockId: header.LastBlockID.ToProto(), LastCommitHash: header.LastCommitHash, DataHash: header.DataHash, @@ -75,52 +62,32 @@ func (tm2pb) Validator(val *Validator) abci.Validator { } } -func (tm2pb) BlockID(blockID BlockID) abci.BlockID { - return abci.BlockID{ - Hash: blockID.Hash, - PartsHeader: TM2PB.PartSetHeader(blockID.PartsHeader), +func (tm2pb) BlockID(blockID BlockID) tmproto.BlockID { + return tmproto.BlockID{ + Hash: blockID.Hash, + PartSetHeader: TM2PB.PartSetHeader(blockID.PartSetHeader), } } -func (tm2pb) PartSetHeader(header PartSetHeader) abci.PartSetHeader { - return abci.PartSetHeader{ - Total: int32(header.Total), +func (tm2pb) PartSetHeader(header PartSetHeader) tmproto.PartSetHeader { + return tmproto.PartSetHeader{ + Total: header.Total, Hash: header.Hash, } } // XXX: panics on unknown pubkey type func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { + pk, err := cryptoenc.PubKeyToProto(val.PubKey) + if err != nil { + panic(err) + } return abci.ValidatorUpdate{ - PubKey: TM2PB.PubKey(val.PubKey), + PubKey: pk, Power: val.VotingPower, } } -// XXX: panics on nil or unknown pubkey type -// TODO: add cases when new pubkey types are added to crypto -func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { - switch pk := pubKey.(type) { - case ed25519.PubKeyEd25519: - return abci.PubKey{ - Type: ABCIPubKeyTypeEd25519, - Data: pk[:], - } - case sr25519.PubKeySr25519: - return abci.PubKey{ - Type: ABCIPubKeyTypeSr25519, - Data: pk[:], - } - case secp256k1.PubKeySecp256k1: - return abci.PubKey{ - Type: ABCIPubKeyTypeSecp256k1, - Data: pk[:], - } - default: - panic(fmt.Sprintf("unknown pubkey type: %v %v", pubKey, reflect.TypeOf(pubKey))) - } -} - // XXX: panics on nil or unknown pubkey type func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { validators := make([]abci.ValidatorUpdate, vals.Size()) @@ -130,56 +97,23 @@ func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { return validators } -func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { +func (tm2pb) ConsensusParams(params *tmproto.ConsensusParams) *abci.ConsensusParams { return &abci.ConsensusParams{ Block: &abci.BlockParams{ MaxBytes: params.Block.MaxBytes, MaxGas: params.Block.MaxGas, }, - Evidence: &abci.EvidenceParams{ - MaxAgeNumBlocks: params.Evidence.MaxAgeNumBlocks, - MaxAgeDuration: params.Evidence.MaxAgeDuration, - }, - Validator: &abci.ValidatorParams{ - PubKeyTypes: params.Validator.PubKeyTypes, - }, - } -} - -// ABCI Evidence includes information from the past that's not included in the evidence itself -// so Evidence types stays compact. -// XXX: panics on nil or unknown pubkey type -func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence { - _, val := valSet.GetByAddress(ev.Address()) - if val == nil { - // should already have checked this - panic(val) - } - - // set type - var evType string - switch ev.(type) { - case *DuplicateVoteEvidence: - evType = ABCIEvidenceTypeDuplicateVote - case MockEvidence: - // XXX: not great to have test types in production paths ... - evType = ABCIEvidenceTypeMock - default: - panic(fmt.Sprintf("Unknown evidence type: %v %v", ev, reflect.TypeOf(ev))) - } - - return abci.Evidence{ - Type: evType, - Validator: TM2PB.Validator(val), - Height: ev.Height(), - Time: evTime, - TotalVotingPower: valSet.TotalVotingPower(), + Evidence: ¶ms.Evidence, + Validator: ¶ms.Validator, } } // XXX: panics on nil or unknown pubkey type func (tm2pb) NewValidatorUpdate(pubkey crypto.PubKey, power int64) abci.ValidatorUpdate { - pubkeyABCI := TM2PB.PubKey(pubkey) + pubkeyABCI, err := cryptoenc.PubKeyToProto(pubkey) + if err != nil { + panic(err) + } return abci.ValidatorUpdate{ PubKey: pubkeyABCI, Power: power, @@ -194,41 +128,10 @@ var PB2TM = pb2tm{} type pb2tm struct{} -func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { - switch pubKey.Type { - case ABCIPubKeyTypeEd25519: - if len(pubKey.Data) != ed25519.PubKeyEd25519Size { - return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", - len(pubKey.Data), ed25519.PubKeyEd25519Size) - } - var pk ed25519.PubKeyEd25519 - copy(pk[:], pubKey.Data) - return pk, nil - case ABCIPubKeyTypeSr25519: - if len(pubKey.Data) != sr25519.PubKeySr25519Size { - return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d", - len(pubKey.Data), sr25519.PubKeySr25519Size) - } - var pk sr25519.PubKeySr25519 - copy(pk[:], pubKey.Data) - return pk, nil - case ABCIPubKeyTypeSecp256k1: - if len(pubKey.Data) != secp256k1.PubKeySecp256k1Size { - return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", - len(pubKey.Data), secp256k1.PubKeySecp256k1Size) - } - var pk secp256k1.PubKeySecp256k1 - copy(pk[:], pubKey.Data) - return pk, nil - default: - return nil, fmt.Errorf("unknown pubkey type %v", pubKey.Type) - } -} - func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { - pub, err := PB2TM.PubKey(v.PubKey) + pub, err := cryptoenc.PubKeyFromProto(v.PubKey) if err != nil { return nil, err } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 6f6e6198b..c617751e4 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -2,33 +2,29 @@ package types import ( "testing" - "time" - "github.com/golang/protobuf/proto" // nolint: staticcheck // still used by gogoproto "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - amino "github.com/tendermint/go-amino" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/version" + cryptoenc "github.com/tendermint/tendermint/crypto/encoding" ) func TestABCIPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - pkSecp := secp256k1.GenPrivKey().PubKey() - testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) - testABCIPubKey(t, pkSecp, ABCIPubKeyTypeSecp256k1) + err := testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) + assert.NoError(t, err) } -func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) { - abciPubKey := TM2PB.PubKey(pk) - pk2, err := PB2TM.PubKey(abciPubKey) - assert.Nil(t, err) - assert.Equal(t, pk, pk2) +func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { + abciPubKey, err := cryptoenc.PubKeyToProto(pk) + require.NoError(t, err) + pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) + require.NoError(t, err) + require.Equal(t, pk, pk2) + return nil } func TestABCIValidators(t *testing.T) { @@ -54,107 +50,24 @@ func TestABCIValidators(t *testing.T) { tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) - - // val with incorrect pubkey data - abciVal = TM2PB.ValidatorUpdate(tmVal) - abciVal.PubKey.Data = []byte("incorrect!") - tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) - assert.NotNil(t, err) - assert.Nil(t, tmVals) } func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() abciCP := TM2PB.ConsensusParams(cp) - cp2 := cp.Update(abciCP) + cp2 := UpdateConsensusParams(*cp, abciCP) assert.Equal(t, *cp, cp2) } -func newHeader( - height int64, commitHash, dataHash, evidenceHash []byte, -) *Header { - return &Header{ - Height: height, - LastCommitHash: commitHash, - DataHash: dataHash, - EvidenceHash: evidenceHash, - } -} - -func TestABCIHeader(t *testing.T) { - // build a full header - var height int64 = 5 - header := newHeader(height, []byte("lastCommitHash"), []byte("dataHash"), []byte("evidenceHash")) - protocolVersion := version.Consensus{Block: 7, App: 8} - timestamp := time.Now() - lastBlockID := BlockID{ - Hash: []byte("hash"), - PartsHeader: PartSetHeader{ - Total: 10, - Hash: []byte("hash"), - }, - } - header.Populate( - protocolVersion, "chainID", timestamp, lastBlockID, - []byte("valHash"), []byte("nextValHash"), - []byte("consHash"), []byte("appHash"), []byte("lastResultsHash"), - []byte("proposerAddress"), - ) - - cdc := amino.NewCodec() - headerBz := cdc.MustMarshalBinaryBare(header) - - pbHeader := TM2PB.Header(header) - pbHeaderBz, err := proto.Marshal(&pbHeader) - assert.NoError(t, err) - - // assert some fields match - assert.EqualValues(t, protocolVersion.Block, pbHeader.Version.Block) - assert.EqualValues(t, protocolVersion.App, pbHeader.Version.App) - assert.EqualValues(t, "chainID", pbHeader.ChainID) - assert.EqualValues(t, height, pbHeader.Height) - assert.EqualValues(t, timestamp, pbHeader.Time) - assert.EqualValues(t, lastBlockID.Hash, pbHeader.LastBlockId.Hash) - assert.EqualValues(t, []byte("lastCommitHash"), pbHeader.LastCommitHash) - assert.Equal(t, []byte("proposerAddress"), pbHeader.ProposerAddress) - - // assert the encodings match - // NOTE: they don't yet because Amino encodes - // int64 as zig-zag and we're using non-zigzag in the protobuf. - // See https://github.com/tendermint/tendermint/issues/2682 - _, _ = headerBz, pbHeaderBz - // assert.EqualValues(t, headerBz, pbHeaderBz) - -} - -func TestABCIEvidence(t *testing.T) { - val := NewMockPV() - blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) - blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) - const chainID = "mychain" - pubKey, err := val.GetPubKey() - require.NoError(t, err) - ev := &DuplicateVoteEvidence{ - PubKey: pubKey, - VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), - } - abciEv := TM2PB.Evidence( - ev, - NewValidatorSet([]*Validator{NewValidator(pubKey, 10)}), - time.Now(), - ) - - assert.Equal(t, "duplicate/vote", abciEv.Type) -} - type pubKeyEddie struct{} -func (pubKeyEddie) Address() Address { return []byte{} } -func (pubKeyEddie) Bytes() []byte { return []byte{} } -func (pubKeyEddie) VerifyBytes(msg []byte, sig []byte) bool { return false } -func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) VerifySignature(msg []byte, sig []byte) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) String() string { return "" } +func (pubKeyEddie) Type() string { return "pubKeyEddie" } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() diff --git a/types/results.go b/types/results.go index 11ddbcea9..9181450bc 100644 --- a/types/results.go +++ b/types/results.go @@ -3,63 +3,29 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/libs/bytes" ) -//----------------------------------------------------------------------------- +// ABCIResults wraps the deliver tx results to return a proof. +type ABCIResults []*abci.ResponseDeliverTx -// ABCIResult is the deterministic component of a ResponseDeliverTx. -// TODO: add tags and other fields -// https://github.com/tendermint/tendermint/issues/1007 -type ABCIResult struct { - Code uint32 `json:"code"` - Data bytes.HexBytes `json:"data"` -} - -// Bytes returns the amino encoded ABCIResult -func (a ABCIResult) Bytes() []byte { - return cdcEncode(a) -} - -// ABCIResults wraps the deliver tx results to return a proof -type ABCIResults []ABCIResult - -// NewResults creates ABCIResults from the list of ResponseDeliverTx. +// NewResults strips non-deterministic fields from ResponseDeliverTx responses +// and returns ABCIResults. func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults { res := make(ABCIResults, len(responses)) for i, d := range responses { - res[i] = NewResultFromResponse(d) + res[i] = deterministicResponseDeliverTx(d) } return res } -// NewResultFromResponse creates ABCIResult from ResponseDeliverTx. -func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { - return ABCIResult{ - Code: response.Code, - Data: response.Data, - } -} - -// Bytes serializes the ABCIResponse using amino -func (a ABCIResults) Bytes() []byte { - bz, err := cdc.MarshalBinaryLengthPrefixed(a) - if err != nil { - panic(err) - } - return bz -} - -// Hash returns a merkle hash of all results +// Hash returns a merkle hash of all results. func (a ABCIResults) Hash() []byte { - // NOTE: we copy the impl of the merkle tree for txs - - // we should be consistent and either do it for both or not. - return merkle.SimpleHashFromByteSlices(a.toByteSlices()) + return merkle.HashFromByteSlices(a.toByteSlices()) } // ProveResult returns a merkle proof of one result from the set -func (a ABCIResults) ProveResult(i int) merkle.SimpleProof { - _, proofs := merkle.SimpleProofsFromByteSlices(a.toByteSlices()) +func (a ABCIResults) ProveResult(i int) merkle.Proof { + _, proofs := merkle.ProofsFromByteSlices(a.toByteSlices()) return *proofs[i] } @@ -67,7 +33,22 @@ func (a ABCIResults) toByteSlices() [][]byte { l := len(a) bzs := make([][]byte, l) for i := 0; i < l; i++ { - bzs[i] = a[i].Bytes() + bz, err := a[i].Marshal() + if err != nil { + panic(err) + } + bzs[i] = bz } return bzs } + +// deterministicResponseDeliverTx strips non-deterministic fields from +// ResponseDeliverTx and returns another ResponseDeliverTx. +func deterministicResponseDeliverTx(response *abci.ResponseDeliverTx) *abci.ResponseDeliverTx { + return &abci.ResponseDeliverTx{ + Code: response.Code, + Data: response.Data, + GasWanted: response.GasWanted, + GasUsed: response.GasUsed, + } +} diff --git a/types/results_test.go b/types/results_test.go index 9ecfe35ca..5b1be3466 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -10,26 +10,31 @@ import ( ) func TestABCIResults(t *testing.T) { - a := ABCIResult{Code: 0, Data: nil} - b := ABCIResult{Code: 0, Data: []byte{}} - c := ABCIResult{Code: 0, Data: []byte("one")} - d := ABCIResult{Code: 14, Data: nil} - e := ABCIResult{Code: 14, Data: []byte("foo")} - f := ABCIResult{Code: 14, Data: []byte("bar")} + a := &abci.ResponseDeliverTx{Code: 0, Data: nil} + b := &abci.ResponseDeliverTx{Code: 0, Data: []byte{}} + c := &abci.ResponseDeliverTx{Code: 0, Data: []byte("one")} + d := &abci.ResponseDeliverTx{Code: 14, Data: nil} + e := &abci.ResponseDeliverTx{Code: 14, Data: []byte("foo")} + f := &abci.ResponseDeliverTx{Code: 14, Data: []byte("bar")} // Nil and []byte{} should produce the same bytes - require.Equal(t, a.Bytes(), a.Bytes()) - require.Equal(t, b.Bytes(), b.Bytes()) - require.Equal(t, a.Bytes(), b.Bytes()) + bzA, err := a.Marshal() + require.NoError(t, err) + bzB, err := b.Marshal() + require.NoError(t, err) + + require.Equal(t, bzA, bzB) // a and b should be the same, don't go in results. results := ABCIResults{a, c, d, e, f} // Make sure each result serializes differently - var last []byte - assert.Equal(t, last, a.Bytes()) // first one is empty + last := []byte{} + assert.Equal(t, last, bzA) // first one is empty for i, res := range results[1:] { - bz := res.Bytes() + bz, err := res.Marshal() + require.NoError(t, err) + assert.NotEqual(t, last, bz, "%d", i) last = bz } @@ -39,19 +44,11 @@ func TestABCIResults(t *testing.T) { assert.NotEmpty(t, root) for i, res := range results { + bz, err := res.Marshal() + require.NoError(t, err) + proof := results.ProveResult(i) - valid := proof.Verify(root, res.Bytes()) + valid := proof.Verify(root, bz) assert.NoError(t, valid, "%d", i) } } - -func TestABCIResultsBytes(t *testing.T) { - results := NewResults([]*abci.ResponseDeliverTx{ - {Code: 0, Data: []byte{}}, - {Code: 0, Data: []byte("one")}, - {Code: 14, Data: nil}, - {Code: 14, Data: []byte("foo")}, - {Code: 14, Data: []byte("bar")}, - }) - assert.NotNil(t, results.Bytes()) -} diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go index 6bd5f057e..4ab5685a7 100644 --- a/types/signed_msg_type.go +++ b/types/signed_msg_type.go @@ -1,21 +1,11 @@ package types -// SignedMsgType is a type of signed message in the consensus. -type SignedMsgType byte - -const ( - // Votes - PrevoteType SignedMsgType = 0x01 - PrecommitType SignedMsgType = 0x02 - - // Proposals - ProposalType SignedMsgType = 0x20 -) +import tmproto "github.com/tendermint/tendermint/proto/tendermint/types" // IsVoteTypeValid returns true if t is a valid vote type. -func IsVoteTypeValid(t SignedMsgType) bool { +func IsVoteTypeValid(t tmproto.SignedMsgType) bool { switch t { - case PrevoteType, PrecommitType: + case tmproto.PrevoteType, tmproto.PrecommitType: return true default: return false diff --git a/types/test_util.go b/types/test_util.go index 377c965a8..367fd0631 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -1,26 +1,27 @@ package types import ( + "fmt" "time" - "github.com/pkg/errors" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -func MakeCommit(blockID BlockID, height int64, round int, +func MakeCommit(blockID BlockID, height int64, round int32, voteSet *VoteSet, validators []PrivValidator, now time.Time) (*Commit, error) { // all sign for i := 0; i < len(validators); i++ { pubKey, err := validators[i].GetPubKey() if err != nil { - return nil, errors.Wrap(err, "can't get pubkey") + return nil, fmt.Errorf("can't get pubkey: %w", err) } vote := &Vote{ ValidatorAddress: pubKey.Address(), - ValidatorIndex: i, + ValidatorIndex: int32(i), Height: height, Round: round, - Type: PrecommitType, + Type: tmproto.PrecommitType, BlockID: blockID, Timestamp: now, } @@ -35,10 +36,12 @@ func MakeCommit(blockID BlockID, height int64, round int, } func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) { - err = privVal.SignVote(voteSet.ChainID(), vote) + v := vote.ToProto() + err = privVal.SignVote(voteSet.ChainID(), v) if err != nil { return false, err } + vote.Signature = v.Signature return voteSet.AddVote(vote) } @@ -52,7 +55,7 @@ func MakeVote( ) (*Vote, error) { pubKey, err := privVal.GetPubKey() if err != nil { - return nil, errors.Wrap(err, "can't get pubkey") + return nil, fmt.Errorf("can't get pubkey: %w", err) } addr := pubKey.Address() idx, _ := valSet.GetByAddress(addr) @@ -62,29 +65,16 @@ func MakeVote( Height: height, Round: 0, Timestamp: now, - Type: PrecommitType, + Type: tmproto.PrecommitType, BlockID: blockID, } - if err := privVal.SignVote(chainID, vote); err != nil { + v := vote.ToProto() + + if err := privVal.SignVote(chainID, v); err != nil { return nil, err } - return vote, nil -} -// MakeBlock returns a new block with an empty header, except what can be -// computed from itself. -// It populates the same set of fields validated by ValidateBasic. -func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { - block := &Block{ - Header: Header{ - Height: height, - }, - Data: Data{ - Txs: txs, - }, - Evidence: EvidenceData{Evidence: evidence}, - LastCommit: lastCommit, - } - block.fillHeader() - return block + vote.Signature = v.Signature + + return vote, nil } diff --git a/types/tx.go b/types/tx.go index 311730228..92df92f13 100644 --- a/types/tx.go +++ b/types/tx.go @@ -5,12 +5,10 @@ import ( "errors" "fmt" - amino "github.com/tendermint/go-amino" - - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" tmbytes "github.com/tendermint/tendermint/libs/bytes" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) // Tx is an arbitrary byte array. @@ -40,7 +38,7 @@ func (txs Txs) Hash() []byte { for i := 0; i < len(txs); i++ { txBzs[i] = txs[i].Hash() } - return merkle.SimpleHashFromByteSlices(txBzs) + return merkle.HashFromByteSlices(txBzs) } // Index returns the index of this transaction in the list, or -1 if not found @@ -72,7 +70,7 @@ func (txs Txs) Proof(i int) TxProof { for i := 0; i < l; i++ { bzs[i] = txs[i].Hash() } - root, proofs := merkle.SimpleProofsFromByteSlices(bzs) + root, proofs := merkle.ProofsFromByteSlices(bzs) return TxProof{ RootHash: root, @@ -83,9 +81,9 @@ func (txs Txs) Proof(i int) TxProof { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - RootHash tmbytes.HexBytes `json:"root_hash"` - Data Tx `json:"data"` - Proof merkle.SimpleProof `json:"proof"` + RootHash tmbytes.HexBytes `json:"root_hash"` + Data Tx `json:"data"` + Proof merkle.Proof `json:"proof"` } // Leaf returns the hash(tx), which is the leaf in the merkle tree which this proof refers to. @@ -112,27 +110,38 @@ func (tp TxProof) Validate(dataHash []byte) error { return nil } -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -type TxResult struct { - Height int64 `json:"height"` - Index uint32 `json:"index"` - Tx Tx `json:"tx"` - Result abci.ResponseDeliverTx `json:"result"` +func (tp TxProof) ToProto() tmproto.TxProof { + + pbProof := tp.Proof.ToProto() + + pbtp := tmproto.TxProof{ + RootHash: tp.RootHash, + Data: tp.Data, + Proof: pbProof, + } + + return pbtp +} +func TxProofFromProto(pb tmproto.TxProof) (TxProof, error) { + + pbProof, err := merkle.ProofFromProto(pb.Proof) + if err != nil { + return TxProof{}, err + } + + pbtp := TxProof{ + RootHash: pb.RootHash, + Data: pb.Data, + Proof: *pbProof, + } + + return pbtp, nil } -// ComputeAminoOverhead calculates the overhead for amino encoding a transaction. -// The overhead consists of varint encoding the field number and the wire type -// (= length-delimited = 2), and another varint encoding the length of the -// transaction. -// The field number can be the field number of the particular transaction, or -// the field number of the parenting struct that contains the transactions []Tx -// as a field (this field number is repeated for each contained Tx). -// If some []Tx are encoded directly (without a parenting struct), the default -// fieldNum is also 1 (see BinFieldNum in amino.MarshalBinaryBare). -func ComputeAminoOverhead(tx Tx, fieldNum int) int64 { - fnum := uint64(fieldNum) - typ3AndFieldNum := (fnum << 3) | uint64(amino.Typ3_ByteLength) - return int64(amino.UvarintSize(typ3AndFieldNum)) + int64(amino.UvarintSize(uint64(len(tx)))) +// ComputeProtoSizeForTxs wraps the transactions in tmproto.Data{} and calculates the size. +// https://developers.google.com/protocol-buffers/docs/encoding +func ComputeProtoSizeForTxs(txs []Tx) int64 { + data := Data{Txs: txs} + pdData := data.ToProto() + return int64(pdData.Size()) } diff --git a/types/tx_test.go b/types/tx_test.go index 84fe1f2fc..0ee308ced 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -5,9 +5,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" tmrand "github.com/tendermint/tendermint/libs/rand" ctest "github.com/tendermint/tendermint/libs/test" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func makeTxs(cnt, size int) Txs { @@ -68,8 +70,8 @@ func TestValidTxProof(t *testing.T) { for i := range txs { tx := []byte(txs[i]) proof := txs.Proof(i) - assert.Equal(t, i, proof.Proof.Index, "%d: %d", h, i) - assert.Equal(t, len(txs), proof.Proof.Total, "%d: %d", h, i) + assert.EqualValues(t, i, proof.Proof.Index, "%d: %d", h, i) + assert.EqualValues(t, len(txs), proof.Proof.Total, "%d: %d", h, i) assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) assert.EqualValues(t, tx, proof.Data, "%d: %d", h, i) assert.EqualValues(t, txs[i].Hash(), proof.Leaf(), "%d: %d", h, i) @@ -77,10 +79,18 @@ func TestValidTxProof(t *testing.T) { assert.NotNil(t, proof.Validate([]byte("foobar")), "%d: %d", h, i) // read-write must also work - var p2 TxProof - bin, err := cdc.MarshalBinaryLengthPrefixed(proof) - assert.Nil(t, err) - err = cdc.UnmarshalBinaryLengthPrefixed(bin, &p2) + var ( + p2 TxProof + pb2 tmproto.TxProof + ) + pbProof := proof.ToProto() + bin, err := pbProof.Marshal() + require.NoError(t, err) + + err = pb2.Unmarshal(bin) + require.NoError(t, err) + + p2, err = TxProofFromProto(pb2) if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { assert.Nil(t, p2.Validate(root), "%d: %d", h, i) } @@ -95,63 +105,6 @@ func TestTxProofUnchangable(t *testing.T) { } } -func TestComputeTxsOverhead(t *testing.T) { - cases := []struct { - txs Txs - wantOverhead int - }{ - {Txs{[]byte{6, 6, 6, 6, 6, 6}}, 2}, - // one 21 Mb transaction: - {Txs{make([]byte, 22020096)}, 5}, - // two 21Mb/2 sized transactions: - {Txs{make([]byte, 11010048), make([]byte, 11010048)}, 10}, - {Txs{[]byte{1, 2, 3}, []byte{1, 2, 3}, []byte{4, 5, 6}}, 6}, - {Txs{[]byte{100, 5, 64}, []byte{42, 116, 118}, []byte{6, 6, 6}, []byte{6, 6, 6}}, 8}, - } - - for _, tc := range cases { - totalBytes := int64(0) - totalOverhead := int64(0) - for _, tx := range tc.txs { - aminoOverhead := ComputeAminoOverhead(tx, 1) - totalOverhead += aminoOverhead - totalBytes += aminoOverhead + int64(len(tx)) - } - bz, err := cdc.MarshalBinaryBare(tc.txs) - assert.EqualValues(t, tc.wantOverhead, totalOverhead) - assert.NoError(t, err) - assert.EqualValues(t, len(bz), totalBytes) - } -} - -func TestComputeAminoOverhead(t *testing.T) { - cases := []struct { - tx Tx - fieldNum int - want int - }{ - {[]byte{6, 6, 6}, 1, 2}, - {[]byte{6, 6, 6}, 16, 3}, - {[]byte{6, 6, 6}, 32, 3}, - {[]byte{6, 6, 6}, 64, 3}, - {[]byte{6, 6, 6}, 512, 3}, - {[]byte{6, 6, 6}, 1024, 3}, - {[]byte{6, 6, 6}, 2048, 4}, - {make([]byte, 64), 1, 2}, - {make([]byte, 65), 1, 2}, - {make([]byte, 127), 1, 2}, - {make([]byte, 128), 1, 3}, - {make([]byte, 256), 1, 3}, - {make([]byte, 512), 1, 3}, - {make([]byte, 1024), 1, 3}, - {make([]byte, 128), 16, 4}, - } - for _, tc := range cases { - got := ComputeAminoOverhead(tc.tx, tc.fieldNum) - assert.EqualValues(t, tc.want, got) - } -} - func testTxProofUnchangable(t *testing.T) { // make some proof txs := makeTxs(randInt(2, 100), randInt(16, 128)) @@ -161,8 +114,9 @@ func testTxProofUnchangable(t *testing.T) { // make sure it is valid to start with assert.Nil(t, proof.Validate(root)) - bin, err := cdc.MarshalBinaryLengthPrefixed(proof) - assert.Nil(t, err) + pbProof := proof.ToProto() + bin, err := pbProof.Marshal() + require.NoError(t, err) // try mutating the data and make sure nothing breaks for j := 0; j < 500; j++ { @@ -175,16 +129,23 @@ func testTxProofUnchangable(t *testing.T) { // This makes sure that the proof doesn't deserialize into something valid. func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { - var proof TxProof - err := cdc.UnmarshalBinaryLengthPrefixed(bad, &proof) + + var ( + proof TxProof + pbProof tmproto.TxProof + ) + err := pbProof.Unmarshal(bad) if err == nil { - err = proof.Validate(root) + proof, err = TxProofFromProto(pbProof) if err == nil { - // XXX Fix simple merkle proofs so the following is *not* OK. - // This can happen if we have a slightly different total (where the - // path ends up the same). If it is something else, we have a real - // problem. - assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) + err = proof.Validate(root) + if err == nil { + // XXX Fix simple merkle proofs so the following is *not* OK. + // This can happen if we have a slightly different total (where the + // path ends up the same). If it is something else, we have a real + // problem. + assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) + } } } } diff --git a/types/validator.go b/types/validator.go index 359a19114..961b833e4 100644 --- a/types/validator.go +++ b/types/validator.go @@ -2,11 +2,14 @@ package types import ( "bytes" + "errors" "fmt" "strings" "github.com/tendermint/tendermint/crypto" + ce "github.com/tendermint/tendermint/crypto/encoding" tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) // Volatile state for each Validator @@ -20,6 +23,7 @@ type Validator struct { ProposerPriority int64 `json:"proposer_priority"` } +// NewValidator returns a new validator with the given pubkey and voting power. func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { return &Validator{ Address: pubKey.Address(), @@ -29,6 +33,26 @@ func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { } } +// ValidateBasic performs basic validation. +func (v *Validator) ValidateBasic() error { + if v == nil { + return errors.New("nil validator") + } + if v.PubKey == nil { + return errors.New("validator does not have a public key") + } + + if v.VotingPower < 0 { + return errors.New("validator has negative voting power") + } + + if len(v.Address) != crypto.AddressSize { + return fmt.Errorf("validator address is the wrong size: %v", v.Address) + } + + return nil +} + // Creates a new copy of the validator so we can mutate ProposerPriority. // Panics if the validator is nil. func (v *Validator) Copy() *Validator { @@ -59,6 +83,12 @@ func (v *Validator) CompareProposerPriority(other *Validator) *Validator { } } +// String returns a string representation of String. +// +// 1. address +// 2. public key +// 3. voting power +// 4. proposer priority func (v *Validator) String() string { if v == nil { return "nil-Validator" @@ -85,13 +115,62 @@ func ValidatorListString(vals []*Validator) string { // as its redundant with the pubkey. This also excludes ProposerPriority // which changes every round. func (v *Validator) Bytes() []byte { - return cdcEncode(struct { - PubKey crypto.PubKey - VotingPower int64 - }{ - v.PubKey, - v.VotingPower, - }) + pk, err := ce.PubKeyToProto(v.PubKey) + if err != nil { + panic(err) + } + + pbv := tmproto.SimpleValidator{ + PubKey: &pk, + VotingPower: v.VotingPower, + } + + bz, err := pbv.Marshal() + if err != nil { + panic(err) + } + return bz +} + +// ToProto converts Valiator to protobuf +func (v *Validator) ToProto() (*tmproto.Validator, error) { + if v == nil { + return nil, errors.New("nil validator") + } + + pk, err := ce.PubKeyToProto(v.PubKey) + if err != nil { + return nil, err + } + + vp := tmproto.Validator{ + Address: v.Address, + PubKey: pk, + VotingPower: v.VotingPower, + ProposerPriority: v.ProposerPriority, + } + + return &vp, nil +} + +// FromProto sets a protobuf Validator to the given pointer. +// It returns an error if the public key is invalid. +func ValidatorFromProto(vp *tmproto.Validator) (*Validator, error) { + if vp == nil { + return nil, errors.New("nil validator") + } + + pk, err := ce.PubKeyFromProto(vp.PubKey) + if err != nil { + return nil, err + } + v := new(Validator) + v.Address = vp.GetAddress() + v.PubKey = pk + v.VotingPower = vp.GetVotingPower() + v.ProposerPriority = vp.GetProposerPriority() + + return v, nil } //---------------------------------------- diff --git a/types/validator_set.go b/types/validator_set.go index f3e4627db..7cff0405d 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,16 +2,16 @@ package types import ( "bytes" + "errors" "fmt" "math" "math/big" "sort" "strings" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto/merkle" tmmath "github.com/tendermint/tendermint/libs/math" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -24,19 +24,28 @@ const ( // and leaves room for defensive purposes. MaxTotalVotingPower = int64(math.MaxInt64) / 8 - // PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives - // the maximum allowed distance between validator priorities. + // PriorityWindowSizeFactor - is a constant that when multiplied with the + // total voting power gives the maximum allowed distance between validator + // priorities. PriorityWindowSizeFactor = 2 ) +// ErrTotalVotingPowerOverflow is returned if the total voting power of the +// resulting validator set exceeds MaxTotalVotingPower. +var ErrTotalVotingPowerOverflow = fmt.Errorf("total voting power of resulting valset exceeds max %d", + MaxTotalVotingPower) + // ValidatorSet represent a set of *Validator at a given height. +// // The validators can be fetched by address or index. -// The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height - ie. the validators -// are sorted by their address. -// On the other hand, the .ProposerPriority of each validator and -// the designated .GetProposer() of a set changes every round, -// upon calling .IncrementProposerPriority(). +// The index is in order of .VotingPower, so the indices are fixed for all +// rounds of a given blockchain height - ie. the validators are sorted by their +// voting power (descending). Secondary index - .Address (ascending). +// +// On the other hand, the .ProposerPriority of each validator and the +// designated .GetProposer() of a set changes every round, upon calling +// .IncrementProposerPriority(). +// // NOTE: Not goroutine-safe. // NOTE: All get/set to validators should copy the value for safety. type ValidatorSet struct { @@ -48,18 +57,21 @@ type ValidatorSet struct { totalVotingPower int64 } -// NewValidatorSet initializes a ValidatorSet by copying over the -// values from `valz`, a list of Validators. If valz is nil or empty, -// the new ValidatorSet will have an empty list of Validators. -// The addresses of validators in `valz` must be unique otherwise the -// function panics. -// Note the validator set size has an implied limit equal to that of the MaxVotesCount - -// commits by a validator set larger than this will fail validation. +// NewValidatorSet initializes a ValidatorSet by copying over the values from +// `valz`, a list of Validators. If valz is nil or empty, the new ValidatorSet +// will have an empty list of Validators. +// +// The addresses of validators in `valz` must be unique otherwise the function +// panics. +// +// Note the validator set size has an implied limit equal to that of the +// MaxVotesCount - commits by a validator set larger than this will fail +// validation. func NewValidatorSet(valz []*Validator) *ValidatorSet { vals := &ValidatorSet{} err := vals.updateWithChangeSet(valz, false) if err != nil { - panic(fmt.Sprintf("cannot create validator set: %s", err)) + panic(fmt.Sprintf("Cannot create validator set: %v", err)) } if len(valz) > 0 { vals.IncrementProposerPriority(1) @@ -67,6 +79,24 @@ func NewValidatorSet(valz []*Validator) *ValidatorSet { return vals } +func (vals *ValidatorSet) ValidateBasic() error { + if vals.IsNilOrEmpty() { + return errors.New("validator set is nil or empty") + } + + for idx, val := range vals.Validators { + if err := val.ValidateBasic(); err != nil { + return fmt.Errorf("invalid validator #%d: %w", idx, err) + } + } + + if err := vals.Proposer.ValidateBasic(); err != nil { + return fmt.Errorf("proposer failed validate basic, error: %w", err) + } + + return nil +} + // IsNilOrEmpty returns true if validator set is nil or empty. func (vals *ValidatorSet) IsNilOrEmpty() bool { return vals == nil || len(vals.Validators) == 0 @@ -74,16 +104,16 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool { // CopyIncrementProposerPriority increments ProposerPriority and updates the // proposer on a copy, and returns it. -func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { +func (vals *ValidatorSet) CopyIncrementProposerPriority(times int32) *ValidatorSet { copy := vals.Copy() copy.IncrementProposerPriority(times) return copy } -// IncrementProposerPriority increments ProposerPriority of each validator and updates the -// proposer. Panics if validator set is empty. +// IncrementProposerPriority increments ProposerPriority of each validator and +// updates the proposer. Panics if validator set is empty. // `times` must be positive. -func (vals *ValidatorSet) IncrementProposerPriority(times int) { +func (vals *ValidatorSet) IncrementProposerPriority(times int32) { if vals.IsNilOrEmpty() { panic("empty validator set") } @@ -100,15 +130,16 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) { var proposer *Validator // Call IncrementProposerPriority(1) times times. - for i := 0; i < times; i++ { + for i := int32(0); i < times; i++ { proposer = vals.incrementProposerPriority() } vals.Proposer = proposer } -// RescalePriorities rescales the priorities such that the distance between the maximum and minimum -// is smaller than `diffMax`. +// RescalePriorities rescales the priorities such that the distance between the +// maximum and minimum is smaller than `diffMax`. Panics if validator set is +// empty. func (vals *ValidatorSet) RescalePriorities(diffMax int64) { if vals.IsNilOrEmpty() { panic("empty validator set") @@ -226,29 +257,31 @@ func (vals *ValidatorSet) Copy() *ValidatorSet { // HasAddress returns true if address given is in the validator set, false - // otherwise. func (vals *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address, vals.Validators[i].Address) <= 0 - }) - return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) + for _, val := range vals.Validators { + if bytes.Equal(val.Address, address) { + return true + } + } + return false } // GetByAddress returns an index of the validator with address and validator -// itself if found. Otherwise, -1 and nil are returned. -func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(vals.Validators), func(i int) bool { - return bytes.Compare(address, vals.Validators[i].Address) <= 0 - }) - if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) { - return idx, vals.Validators[idx].Copy() +// itself (copy) if found. Otherwise, -1 and nil are returned. +func (vals *ValidatorSet) GetByAddress(address []byte) (index int32, val *Validator) { + for idx, val := range vals.Validators { + if bytes.Equal(val.Address, address) { + return int32(idx), val.Copy() + } } return -1, nil } -// GetByIndex returns the validator's address and validator itself by index. +// GetByIndex returns the validator's address and validator itself (copy) by +// index. // It returns nil values if index is less than 0 or greater or equal to // len(ValidatorSet.Validators). -func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(vals.Validators) { +func (vals *ValidatorSet) GetByIndex(index int32) (address []byte, val *Validator) { + if index < 0 || int(index) >= len(vals.Validators) { return nil, nil } val = vals.Validators[index] @@ -263,7 +296,6 @@ func (vals *ValidatorSet) Size() int { // Forces recalculation of the set's total voting power. // Panics if total voting power is bigger than MaxTotalVotingPower. func (vals *ValidatorSet) updateTotalVotingPower() { - sum := int64(0) for _, val := range vals.Validators { // mind overflow @@ -313,14 +345,11 @@ func (vals *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. func (vals *ValidatorSet) Hash() []byte { - if len(vals.Validators) == 0 { - return nil - } bzs := make([][]byte, len(vals.Validators)) for i, val := range vals.Validators { bzs[i] = val.Bytes() } - return merkle.SimpleHashFromByteSlices(bzs) + return merkle.HashFromByteSlices(bzs) } // Iterate will run the given function over the set. @@ -397,6 +426,7 @@ func verifyUpdates( vals *ValidatorSet, removedPower int64, ) (tvpAfterUpdatesBeforeRemovals int64, err error) { + delta := func(update *Validator, vals *ValidatorSet) int64 { _, val := vals.GetByAddress(update.Address) if val != nil { @@ -414,10 +444,7 @@ func verifyUpdates( for _, upd := range updatesCopy { tvpAfterRemovals += delta(upd, vals) if tvpAfterRemovals > MaxTotalVotingPower { - err = fmt.Errorf( - "failed to add/update validator %v, total voting power would exceed the max allowed %v", - upd.Address, MaxTotalVotingPower) - return 0, err + return 0, ErrTotalVotingPowerOverflow } } return tvpAfterRemovals + removedPower, nil @@ -445,7 +472,6 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int { // // No changes are made to the validator set 'vals'. func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { - for _, valUpdate := range updates { address := valUpdate.Address _, val := vals.GetByAddress(address) @@ -471,8 +497,9 @@ func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotal // Expects updates to be a list of updates sorted by address with no duplicates or errors, // must have been validated with verifyUpdates() and priorities computed with computeNewPriorities(). func (vals *ValidatorSet) applyUpdates(updates []*Validator) { - existing := vals.Validators + sort.Sort(ValidatorsByAddress(existing)) + merged := make([]*Validator, len(existing)+len(updates)) i := 0 @@ -509,7 +536,6 @@ func (vals *ValidatorSet) applyUpdates(updates []*Validator) { // Checks that the validators to be removed are part of the validator set. // No changes are made to the validator set 'vals'. func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (votingPower int64, err error) { - removedVotingPower := int64(0) for _, valUpdate := range deletes { address := valUpdate.Address @@ -527,8 +553,8 @@ func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (votingPower int64 // Removes the validators specified in 'deletes' from validator set 'vals'. // Should not fail as verification has been done before. +// Expects vals to be sorted by address (done by applyUpdates). func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { - existing := vals.Validators merged := make([]*Validator, len(existing)-len(deletes)) @@ -559,7 +585,6 @@ func (vals *ValidatorSet) applyRemovals(deletes []*Validator) { // are not allowed and will trigger an error if present in 'changes'. // The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes bool) error { - if len(changes) == 0 { return nil } @@ -606,6 +631,8 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) vals.shiftByAvgProposerPriority() + sort.Sort(ValidatorsByVotingPower(vals.Validators)) + return nil } @@ -626,14 +653,29 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { } // VerifyCommit verifies +2/3 of the set had signed the given commit. +// +// It checks all the signatures! While it's safe to exit as soon as we have +// 2/3+ signatures, doing so would impact incentivization logic in the ABCI +// application that depends on the LastCommitInfo sent in BeginBlock, which +// includes which validators signed. For instance, Gaia incentivizes proposers +// with a bonus for including more than +2/3 of the signatures. func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if commit == nil { + return errors.New("nil commit") + } if vals.Size() != len(commit.Signatures) { return NewErrInvalidCommitSignatures(vals.Size(), len(commit.Signatures)) } - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err + + // Validate Height and BlockID. + if height != commit.Height { + return NewErrInvalidCommitHeight(height, commit.Height) + } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", + blockID, commit.BlockID) } talliedVotingPower := int64(0) @@ -648,130 +690,114 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, val := vals.Validators[idx] // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + voteSignBytes := commit.VoteSignBytes(chainID, int32(idx)) + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) } // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + if commitSig.ForBlock() { talliedVotingPower += val.VotingPower } // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. + // It's OK. We include stray signatures (~votes for nil) to measure + // validator availability. // } + } - // return as soon as +2/3 of the signatures are verified - if talliedVotingPower > votingPowerNeeded { - return nil - } + if got, needed := talliedVotingPower, votingPowerNeeded; got <= needed { + return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} } - // talliedVotingPower <= needed, thus return error - return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} + return nil } -// VerifyFutureCommit will check to see if the set would be valid with a different -// validator set. -// -// vals is the old validator set that we know. Over 2/3 of the power in old -// signed this block. -// -// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 -// can't make arbitrary state transitions. You still need > 2/3 Byzantine to -// make arbitrary state transitions. -// -// To preserve this property in the light client, we also require > 2/3 of the -// old vals to sign the future commit at H, that way we preserve the property -// that if they weren't being truthful about the validator set at H (block hash -// -> vals hash) or about the app state (block hash -> app hash) we can slash -// > 2/3. Otherwise, the lite client isn't providing the same security -// guarantees. -// -// Even if we added a slashing condition that if you sign a block header with -// the wrong validator set, then we would only need > 1/3 of signatures from -// the old vals on the new commit, it wouldn't be sufficient because the new -// vals can be arbitrary and commit some arbitrary app hash. -// -// newSet is the validator set that signed this block. Only votes from new are -// sufficient for 2/3 majority in the new set as well, for it to be a valid -// commit. +// LIGHT CLIENT VERIFICATION METHODS + +// VerifyCommitLight verifies +2/3 of the set had signed the given commit. // -// NOTE: This doesn't check whether the commit is a future commit, because the -// current height isn't part of the ValidatorSet. Caller must check that the -// commit height is greater than the height for this validator set. -func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, - blockID BlockID, height int64, commit *Commit) error { - oldVals := vals - - // Commit must be a valid commit for newSet. - err := newSet.VerifyCommit(chainID, blockID, height, commit) - if err != nil { - return err +// This method is primarily used by the light client and does not check all the +// signatures. +func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, + height int64, commit *Commit) error { + if commit == nil { + return errors.New("nil commit") + } + + if vals.Size() != len(commit.Signatures) { + return NewErrInvalidCommitSignatures(vals.Size(), len(commit.Signatures)) } - // Check old voting power. - oldVotingPower := int64(0) - seen := map[int]bool{} + // Validate Height and BlockID. + if height != commit.Height { + return NewErrInvalidCommitHeight(height, commit.Height) + } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", + blockID, commit.BlockID) + } + talliedVotingPower := int64(0) + votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. + // No need to verify absent or nil votes. + if !commitSig.ForBlock() { + continue } - // See if this validator is in oldVals. - oldIdx, val := oldVals.GetByAddress(commitSig.ValidatorAddress) - if val == nil || seen[oldIdx] { - continue // missing or double vote... - } - seen[oldIdx] = true + // The vals and commit have a 1-to-1 correspondance. + // This means we don't need the validator address or to do any lookup. + val := vals.Validators[idx] // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + voteSignBytes := commit.VoteSignBytes(chainID, int32(idx)) + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - oldVotingPower += val.VotingPower + + talliedVotingPower += val.VotingPower + + // return as soon as +2/3 of the signatures are verified + if talliedVotingPower > votingPowerNeeded { + return nil } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } } - if got, needed := oldVotingPower, oldVals.TotalVotingPower()*2/3; got <= needed { - return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} - } - return nil + return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} } -// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator -// set signed this commit. +// VerifyCommitLightTrusting verifies that trustLevel of the validator set signed +// this commit. +// // NOTE the given validators do not necessarily correspond to the validator set // for this commit, but there may be some intersection. -func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID, - height int64, commit *Commit, trustLevel tmmath.Fraction) error { - - if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3 - trustLevel.Numerator > trustLevel.Denominator { // > 1 - panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel)) +// +// This method is primarily used by the light client and does not check all the +// signatures. +func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Commit, trustLevel tmmath.Fraction) error { + // sanity checks + if trustLevel.Denominator == 0 { + return errors.New("trustLevel has zero Denominator") } - - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err + if commit == nil { + return errors.New("nil commit") } var ( talliedVotingPower int64 - seenVals = make(map[int]int, len(commit.Signatures)) // validator index -> commit index - votingPowerNeeded = (vals.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator + seenVals = make(map[int32]int, len(commit.Signatures)) // validator index -> commit index ) + // Safely calculate voting power needed. + totalVotingPowerMulByNumerator, overflow := safeMul(vals.TotalVotingPower(), int64(trustLevel.Numerator)) + if overflow { + return errors.New("int64 overflow while calculating voting power needed. please provide smaller trustLevel numerator") + } + votingPowerNeeded := totalVotingPowerMulByNumerator / int64(trustLevel.Denominator) + for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. + // No need to verify absent or nil votes. + if !commitSig.ForBlock() { + continue } // We don't know the validators that committed this block, so we have to @@ -782,24 +808,17 @@ func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID, // check for double vote of validator on the same commit if firstIndex, ok := seenVals[valIdx]; ok { secondIndex := idx - return errors.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) + return fmt.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) } seenVals[valIdx] = idx // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + voteSignBytes := commit.VoteSignBytes(chainID, int32(idx)) + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - talliedVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } + talliedVotingPower += val.VotingPower if talliedVotingPower > votingPowerNeeded { return nil @@ -810,18 +829,22 @@ func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID, return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} } -func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { - if err := commit.ValidateBasic(); err != nil { - return err - } - if height != commit.Height { - return NewErrInvalidCommitHeight(height, commit.Height) - } - if !blockID.Equals(commit.BlockID) { - return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", - blockID, commit.BlockID) +// findPreviousProposer reverses the compare proposer priority function to find the validator +// with the lowest proposer priority which would have been the previous proposer. +// +// Is used when recreating a validator set from an existing array of validators. +func (vals *ValidatorSet) findPreviousProposer() *Validator { + var previousProposer *Validator + for _, val := range vals.Validators { + if previousProposer == nil { + previousProposer = val + continue + } + if previousProposer == previousProposer.CompareProposerPriority(val) { + previousProposer = val + } } - return nil + return previousProposer } //----------------- @@ -829,8 +852,7 @@ func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { // IsErrNotEnoughVotingPowerSigned returns true if err is // ErrNotEnoughVotingPowerSigned. func IsErrNotEnoughVotingPowerSigned(err error) bool { - _, ok := errors.Cause(err).(ErrNotEnoughVotingPowerSigned) - return ok + return errors.As(err, &ErrNotEnoughVotingPowerSigned{}) } // ErrNotEnoughVotingPowerSigned is returned when not enough validators signed @@ -846,11 +868,16 @@ func (e ErrNotEnoughVotingPowerSigned) Error() string { //---------------- +// String returns a string representation of ValidatorSet. +// +// See StringIndented. func (vals *ValidatorSet) String() string { return vals.StringIndented("") } -// StringIndented returns an intended string representation of ValidatorSet. +// StringIndented returns an intended String. +// +// See Validator#String. func (vals *ValidatorSet) StringIndented(indent string) string { if vals == nil { return "nil-ValidatorSet" @@ -873,46 +900,140 @@ func (vals *ValidatorSet) StringIndented(indent string) string { } //------------------------------------- -// Implements sort for sorting validators by address. -// ValidatorsByAddress implements the sort of validators by address. -type ValidatorsByAddress []*Validator +// ValidatorsByVotingPower implements sort.Interface for []*Validator based on +// the VotingPower and Address fields. +type ValidatorsByVotingPower []*Validator + +func (valz ValidatorsByVotingPower) Len() int { return len(valz) } + +func (valz ValidatorsByVotingPower) Less(i, j int) bool { + if valz[i].VotingPower == valz[j].VotingPower { + return bytes.Compare(valz[i].Address, valz[j].Address) == -1 + } + return valz[i].VotingPower > valz[j].VotingPower +} -func (valz ValidatorsByAddress) Len() int { - return len(valz) +func (valz ValidatorsByVotingPower) Swap(i, j int) { + valz[i], valz[j] = valz[j], valz[i] } +// ValidatorsByAddress implements sort.Interface for []*Validator based on +// the Address field. +type ValidatorsByAddress []*Validator + +func (valz ValidatorsByAddress) Len() int { return len(valz) } + func (valz ValidatorsByAddress) Less(i, j int) bool { return bytes.Compare(valz[i].Address, valz[j].Address) == -1 } func (valz ValidatorsByAddress) Swap(i, j int) { - it := valz[i] - valz[i] = valz[j] - valz[j] = it + valz[i], valz[j] = valz[j], valz[i] +} + +// ToProto converts ValidatorSet to protobuf +func (vals *ValidatorSet) ToProto() (*tmproto.ValidatorSet, error) { + if vals.IsNilOrEmpty() { + return &tmproto.ValidatorSet{}, nil // validator set should never be nil + } + + vp := new(tmproto.ValidatorSet) + valsProto := make([]*tmproto.Validator, len(vals.Validators)) + for i := 0; i < len(vals.Validators); i++ { + valp, err := vals.Validators[i].ToProto() + if err != nil { + return nil, err + } + valsProto[i] = valp + } + vp.Validators = valsProto + + valProposer, err := vals.Proposer.ToProto() + if err != nil { + return nil, fmt.Errorf("toProto: validatorSet proposer error: %w", err) + } + vp.Proposer = valProposer + + vp.TotalVotingPower = vals.totalVotingPower + + return vp, nil +} + +// ValidatorSetFromProto sets a protobuf ValidatorSet to the given pointer. +// It returns an error if any of the validators from the set or the proposer +// is invalid +func ValidatorSetFromProto(vp *tmproto.ValidatorSet) (*ValidatorSet, error) { + if vp == nil { + return nil, errors.New("nil validator set") // validator set should never be nil, bigger issues are at play if empty + } + vals := new(ValidatorSet) + + valsProto := make([]*Validator, len(vp.Validators)) + for i := 0; i < len(vp.Validators); i++ { + v, err := ValidatorFromProto(vp.Validators[i]) + if err != nil { + return nil, err + } + valsProto[i] = v + } + vals.Validators = valsProto + + p, err := ValidatorFromProto(vp.GetProposer()) + if err != nil { + return nil, fmt.Errorf("fromProto: validatorSet proposer error: %w", err) + } + + vals.Proposer = p + + vals.totalVotingPower = vp.GetTotalVotingPower() + + return vals, vals.ValidateBasic() +} + +// ValidatorSetFromExistingValidators takes an existing array of validators and rebuilds +// the exact same validator set that corresponds to it without changing the proposer priority or power +// if any of the validators fail validate basic then an empty set is returned. +func ValidatorSetFromExistingValidators(valz []*Validator) (*ValidatorSet, error) { + for _, val := range valz { + err := val.ValidateBasic() + if err != nil { + return nil, fmt.Errorf("can't create validator set: %w", err) + } + } + vals := &ValidatorSet{ + Validators: valz, + } + vals.Proposer = vals.findPreviousProposer() + vals.updateTotalVotingPower() + sort.Sort(ValidatorsByVotingPower(vals.Validators)) + return vals, nil } //---------------------------------------- -// for testing -// RandValidatorSet returns a randomized validator set, useful for testing. -// NOTE: PrivValidator are in order. -// UNSTABLE +// RandValidatorSet returns a randomized validator set (size: +numValidators+), +// where each validator has a voting power of +votingPower+. +// +// EXPOSED FOR TESTING. func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - valz := make([]*Validator, numValidators) - privValidators := make([]PrivValidator, numValidators) + var ( + valz = make([]*Validator, numValidators) + privValidators = make([]PrivValidator, numValidators) + ) + for i := 0; i < numValidators; i++ { val, privValidator := RandValidator(false, votingPower) valz[i] = val privValidators[i] = privValidator } - vals := NewValidatorSet(valz) + sort.Sort(PrivValidatorsByAddress(privValidators)) - return vals, privValidators + + return NewValidatorSet(valz), privValidators } -/////////////////////////////////////////////////////////////////////////////// -// safe addition/subtraction +// safe addition/subtraction/multiplication func safeAdd(a, b int64) (int64, bool) { if b > 0 && a > math.MaxInt64-b { @@ -953,3 +1074,25 @@ func safeSubClip(a, b int64) int64 { } return c } + +func safeMul(a, b int64) (int64, bool) { + if a == 0 || b == 0 { + return 0, false + } + + absOfB := b + if b < 0 { + absOfB = -b + } + + absOfA := a + if a < 0 { + absOfA = -a + } + + if absOfA > math.MaxInt64/absOfB { + return 0, true + } + + return a * b, false +} diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 2a6dff9f6..84fdcdf48 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" - tmtime "github.com/tendermint/tendermint/types/time" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func TestValidatorSetBasic(t *testing.T) { @@ -32,7 +32,7 @@ func TestValidatorSetBasic(t *testing.T) { assert.EqualValues(t, vset, vset.Copy()) assert.False(t, vset.HasAddress([]byte("some val"))) idx, val := vset.GetByAddress([]byte("some val")) - assert.Equal(t, -1, idx) + assert.EqualValues(t, -1, idx) assert.Nil(t, val) addr, val := vset.GetByIndex(-100) assert.Nil(t, addr) @@ -46,15 +46,16 @@ func TestValidatorSetBasic(t *testing.T) { assert.Zero(t, vset.Size()) assert.Equal(t, int64(0), vset.TotalVotingPower()) assert.Nil(t, vset.GetProposer()) - assert.Nil(t, vset.Hash()) - + assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, + 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, + 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash()) // add val = randValidator(vset.TotalVotingPower()) assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) assert.True(t, vset.HasAddress(val.Address)) idx, _ = vset.GetByAddress(val.Address) - assert.Equal(t, 0, idx) + assert.EqualValues(t, 0, idx) addr, _ = vset.GetByIndex(0) assert.Equal(t, []byte(val.Address), addr) assert.Equal(t, 1, vset.Size()) @@ -77,6 +78,64 @@ func TestValidatorSetBasic(t *testing.T) { } +func TestValidatorSetValidateBasic(t *testing.T) { + val, _ := RandValidator(false, 1) + badVal := &Validator{} + + testCases := []struct { + vals ValidatorSet + err bool + msg string + }{ + { + vals: ValidatorSet{}, + err: true, + msg: "validator set is nil or empty", + }, + { + vals: ValidatorSet{ + Validators: []*Validator{}, + }, + err: true, + msg: "validator set is nil or empty", + }, + { + vals: ValidatorSet{ + Validators: []*Validator{val}, + }, + err: true, + msg: "proposer failed validate basic, error: nil validator", + }, + { + vals: ValidatorSet{ + Validators: []*Validator{badVal}, + }, + err: true, + msg: "invalid validator #0: validator does not have a public key", + }, + { + vals: ValidatorSet{ + Validators: []*Validator{val}, + Proposer: val, + }, + err: false, + msg: "", + }, + } + + for _, tc := range testCases { + err := tc.vals.ValidateBasic() + if tc.err { + if assert.Error(t, err) { + assert.Equal(t, tc.msg, err.Error()) + } + } else { + assert.NoError(t, err) + } + } + +} + func TestCopy(t *testing.T) { vset := randValidatorSet(10) vsetHash := vset.Hash() @@ -242,14 +301,17 @@ func TestProposerSelection2(t *testing.T) { func TestProposerSelection3(t *testing.T) { vset := NewValidatorSet([]*Validator{ - newValidator([]byte("a"), 1), - newValidator([]byte("b"), 1), - newValidator([]byte("c"), 1), - newValidator([]byte("d"), 1), + newValidator([]byte("avalidator_address12"), 1), + newValidator([]byte("bvalidator_address12"), 1), + newValidator([]byte("cvalidator_address12"), 1), + newValidator([]byte("dvalidator_address12"), 1), }) proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { + // need to give all validators to have keys + pk := ed25519.GenPrivKey().PubKey() + vset.Validators[i].PubKey = pk proposerOrder[i] = vset.GetProposer() vset.IncrementProposerPriority(1) } @@ -257,7 +319,10 @@ func TestProposerSelection3(t *testing.T) { // i for the loop // j for the times // we should go in order for ever, despite some IncrementProposerPriority with times > 1 - var i, j int + var ( + i int + j int32 + ) for ; i < 10000; i++ { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address @@ -267,7 +332,7 @@ func TestProposerSelection3(t *testing.T) { // serialize, deserialize, check proposer b := vset.toBytes() - vset.fromBytes(b) + vset = vset.fromBytes(b) computed := vset.GetProposer() // findGetProposer() if i != 0 { @@ -285,11 +350,11 @@ func TestProposerSelection3(t *testing.T) { } // times is usually 1 - times := 1 + times := int32(1) mod := (tmrand.Int() % 5) + 1 if tmrand.Int()%mod > 0 { // sometimes its up to 5 - times = (tmrand.Int() % 4) + 1 + times = (tmrand.Int31() % 4) + 1 } vset.IncrementProposerPriority(times) @@ -302,9 +367,9 @@ func newValidator(address []byte, power int64) *Validator { } func randPubKey() crypto.PubKey { - var pubKey [32]byte - copy(pubKey[:], tmrand.Bytes(32)) - return ed25519.PubKeyEd25519(pubKey) + pubKey := make(ed25519.PubKey, ed25519.PubKeySize) + copy(pubKey, tmrand.Bytes(32)) + return ed25519.PubKey(tmrand.Bytes(32)) } func randValidator(totalVotingPower int64) *Validator { @@ -326,19 +391,33 @@ func randValidatorSet(numValidators int) *ValidatorSet { } func (vals *ValidatorSet) toBytes() []byte { - bz, err := cdc.MarshalBinaryLengthPrefixed(vals) + pbvs, err := vals.ToProto() + if err != nil { + panic(err) + } + + bz, err := pbvs.Marshal() if err != nil { panic(err) } + return bz } -func (vals *ValidatorSet) fromBytes(b []byte) { - err := cdc.UnmarshalBinaryLengthPrefixed(b, &vals) +func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet { + pbvs := new(tmproto.ValidatorSet) + err := pbvs.Unmarshal(b) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED panic(err) } + + vs, err := ValidatorSetFromProto(pbvs) + if err != nil { + panic(err) + } + + return vs } //------------------------------------------------------------------- @@ -397,7 +476,7 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { // the expected ProposerPriority. tcs := []struct { vs ValidatorSet - times int + times int32 avg int64 }{ 0: {ValidatorSet{ @@ -447,7 +526,7 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { tcs := []struct { vals *ValidatorSet wantProposerPrioritys []int64 - times int + times int32 wantProposer *Validator }{ @@ -584,62 +663,153 @@ func TestSafeSubClip(t *testing.T) { //------------------------------------------------------------------- -func TestValidatorSetVerifyCommit(t *testing.T) { - privKey := ed25519.GenPrivKey() - pubKey := privKey.PubKey() - v1 := NewValidator(pubKey, 1000) - vset := NewValidatorSet([]*Validator{v1}) +// Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic +// verification. +func TestValidatorSet_VerifyCommit_All(t *testing.T) { + var ( + privKey = ed25519.GenPrivKey() + pubKey = privKey.PubKey() + v1 = NewValidator(pubKey, 1000) + vset = NewValidatorSet([]*Validator{v1}) + + chainID = "Lalande21185" + ) + + vote := examplePrecommit() + vote.ValidatorAddress = pubKey.Address() + v := vote.ToProto() + sig, err := privKey.Sign(VoteSignBytes(chainID, v)) + require.NoError(t, err) + vote.Signature = sig + + commit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()}) + + vote2 := *vote + sig2, err := privKey.Sign(VoteSignBytes("EpsilonEridani", v)) + require.NoError(t, err) + vote2.Signature = sig2 + + testCases := []struct { + description string + chainID string + blockID BlockID + height int64 + commit *Commit + expErr bool + }{ + {"good", chainID, vote.BlockID, vote.Height, commit, false}, + + {"wrong signature (#0)", "EpsilonEridani", vote.BlockID, vote.Height, commit, true}, + {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true}, + {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true}, + + {"wrong set size: 1 vs 0", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true}, + + {"wrong set size: 1 vs 2", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, + []CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true}, + + {"insufficient voting power: got 0, needed more than 666", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true}, + + {"wrong signature (#0)", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.description, func(t *testing.T) { + err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit) + if tc.expErr { + if assert.Error(t, err, "VerifyCommit") { + assert.Contains(t, err.Error(), tc.description, "VerifyCommit") + } + } else { + assert.NoError(t, err, "VerifyCommit") + } + + err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit) + if tc.expErr { + if assert.Error(t, err, "VerifyCommitLight") { + assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight") + } + } else { + assert.NoError(t, err, "VerifyCommitLight") + } + }) + } +} - // good +func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { var ( - chainID = "mychainID" + chainID = "test_chain_id" + h = int64(3) blockID = makeBlockIDRandom() - height = int64(5) ) - vote := &Vote{ - ValidatorAddress: v1.Address, - ValidatorIndex: 0, - Height: height, - Round: 0, - Timestamp: tmtime.Now(), - Type: PrecommitType, - BlockID: blockID, - } - sig, err := privKey.Sign(vote.SignBytes(chainID)) + + voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10) + commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now()) + require.NoError(t, err) + + // malleate 4th signature + vote := voteSet.GetByIndex(3) + v := vote.ToProto() + err = vals[3].SignVote("CentaurusA", v) + require.NoError(t, err) + vote.Signature = v.Signature + commit.Signatures[3] = vote.CommitSig() + + err = valSet.VerifyCommit(chainID, blockID, h, commit) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "wrong signature (#3)") + } +} + +func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) { + var ( + chainID = "test_chain_id" + h = int64(3) + blockID = makeBlockIDRandom() + ) + + voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10) + commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now()) + require.NoError(t, err) + + // malleate 4th signature (3 signatures are enough for 2/3+) + vote := voteSet.GetByIndex(3) + v := vote.ToProto() + err = vals[3].SignVote("CentaurusA", v) + require.NoError(t, err) + vote.Signature = v.Signature + commit.Signatures[3] = vote.CommitSig() + + err = valSet.VerifyCommitLight(chainID, blockID, h, commit) assert.NoError(t, err) - vote.Signature = sig - commit := NewCommit(vote.Height, vote.Round, blockID, []CommitSig{vote.CommitSig()}) +} - // bad +func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) { var ( - badChainID = "notmychainID" - badBlockID = BlockID{Hash: []byte("goodbye")} - badHeight = height + 1 - badCommit = NewCommit(badHeight, 0, blockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}) + chainID = "test_chain_id" + h = int64(3) + blockID = makeBlockIDRandom() ) - // test some error cases - // TODO: test more cases! - cases := []struct { - chainID string - blockID BlockID - height int64 - commit *Commit - }{ - {badChainID, blockID, height, commit}, - {chainID, badBlockID, height, commit}, - {chainID, blockID, badHeight, commit}, - {chainID, blockID, height, badCommit}, - } + voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10) + commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now()) + require.NoError(t, err) - for i, c := range cases { - err := vset.VerifyCommit(c.chainID, c.blockID, c.height, c.commit) - assert.NotNil(t, err, i) - } + // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) + vote := voteSet.GetByIndex(2) + v := vote.ToProto() + err = vals[2].SignVote("CentaurusA", v) + require.NoError(t, err) + vote.Signature = v.Signature + commit.Signatures[2] = vote.CommitSig() - // test a good one - err = vset.VerifyCommit(chainID, blockID, height, commit) - assert.Nil(t, err) + err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3}) + assert.NoError(t, err) } func TestEmptySet(t *testing.T) { @@ -929,28 +1099,28 @@ func TestValSetUpdatesBasicTestsExecute(t *testing.T) { }, { // voting power changes testValSet(2, 10), - []testVal{{"v1", 11}, {"v2", 22}}, - []testVal{{"v1", 11}, {"v2", 22}}, + []testVal{{"v2", 22}, {"v1", 11}}, + []testVal{{"v2", 22}, {"v1", 11}}, }, { // add new validators - []testVal{{"v1", 10}, {"v2", 20}}, - []testVal{{"v3", 30}, {"v4", 40}}, - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v2", 20}, {"v1", 10}}, + []testVal{{"v4", 40}, {"v3", 30}}, + []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, }, { // add new validator to middle - []testVal{{"v1", 10}, {"v3", 20}}, + []testVal{{"v3", 20}, {"v1", 10}}, []testVal{{"v2", 30}}, - []testVal{{"v1", 10}, {"v2", 30}, {"v3", 20}}, + []testVal{{"v2", 30}, {"v3", 20}, {"v1", 10}}, }, { // add new validator to beginning - []testVal{{"v2", 10}, {"v3", 20}}, + []testVal{{"v3", 20}, {"v2", 10}}, []testVal{{"v1", 30}}, - []testVal{{"v1", 30}, {"v2", 10}, {"v3", 20}}, + []testVal{{"v1", 30}, {"v3", 20}, {"v2", 10}}, }, { // delete validators - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}}, + []testVal{{"v3", 30}, {"v2", 20}, {"v1", 10}}, []testVal{{"v2", 0}}, - []testVal{{"v1", 10}, {"v3", 30}}, + []testVal{{"v3", 30}, {"v1", 10}}, }, } @@ -987,19 +1157,19 @@ func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) { updateVals []testVal }{ 0: { // order of changes should not matter, the final validator sets should be the same - []testVal{{"v1", 10}, {"v2", 10}, {"v3", 30}, {"v4", 40}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}}, + []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}}, + []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}}, 1: { // order of additions should not matter - []testVal{{"v1", 10}, {"v2", 20}}, + []testVal{{"v2", 20}, {"v1", 10}}, []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}}, 2: { // order of removals should not matter - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}}, 3: { // order of mixed operations should not matter - []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}, {"v4", 40}}, + []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}}, } @@ -1102,7 +1272,7 @@ type testVSetCfg struct { updatedVals []testVal addedVals []testVal expectedVals []testVal - wantErr bool + expErr error } func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { @@ -1150,50 +1320,54 @@ func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { cfg.expectedVals[i-nDel] = cfg.addedVals[i-nBase] } - sort.Sort(testValsByAddress(cfg.startVals)) - sort.Sort(testValsByAddress(cfg.deletedVals)) - sort.Sort(testValsByAddress(cfg.updatedVals)) - sort.Sort(testValsByAddress(cfg.addedVals)) - sort.Sort(testValsByAddress(cfg.expectedVals)) + sort.Sort(testValsByVotingPower(cfg.startVals)) + sort.Sort(testValsByVotingPower(cfg.deletedVals)) + sort.Sort(testValsByVotingPower(cfg.updatedVals)) + sort.Sort(testValsByVotingPower(cfg.addedVals)) + sort.Sort(testValsByVotingPower(cfg.expectedVals)) return cfg } -func applyChangesToValSet(t *testing.T, wantErr bool, valSet *ValidatorSet, valsLists ...[]testVal) { +func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) { changes := make([]testVal, 0) for _, valsList := range valsLists { changes = append(changes, valsList...) } valList := createNewValidatorList(changes) err := valSet.UpdateWithChangeSet(valList) - assert.Equal(t, wantErr, err != nil, "got error %v", err) + if expErr != nil { + assert.Equal(t, expErr, err) + } else { + assert.NoError(t, err) + } } func TestValSetUpdatePriorityOrderTests(t *testing.T) { - const nMaxElections = 5000 + const nMaxElections int32 = 5000 testCases := []testVSetCfg{ 0: { // remove high power validator, keep old equal lower power validators - startVals: []testVal{{"v1", 1}, {"v2", 1}, {"v3", 1000}}, + startVals: []testVal{{"v3", 1000}, {"v1", 1}, {"v2", 1}}, deletedVals: []testVal{{"v3", 0}}, updatedVals: []testVal{}, addedVals: []testVal{}, expectedVals: []testVal{{"v1", 1}, {"v2", 1}}, }, 1: { // remove high power validator, keep old different power validators - startVals: []testVal{{"v1", 1}, {"v2", 10}, {"v3", 1000}}, + startVals: []testVal{{"v3", 1000}, {"v2", 10}, {"v1", 1}}, deletedVals: []testVal{{"v3", 0}}, updatedVals: []testVal{}, addedVals: []testVal{}, - expectedVals: []testVal{{"v1", 1}, {"v2", 10}}, + expectedVals: []testVal{{"v2", 10}, {"v1", 1}}, }, 2: { // remove high power validator, add new low power validators, keep old lower power - startVals: []testVal{{"v1", 1}, {"v2", 2}, {"v3", 1000}}, + startVals: []testVal{{"v3", 1000}, {"v2", 2}, {"v1", 1}}, deletedVals: []testVal{{"v3", 0}}, updatedVals: []testVal{{"v2", 1}}, - addedVals: []testVal{{"v4", 40}, {"v5", 50}}, - expectedVals: []testVal{{"v1", 1}, {"v2", 1}, {"v4", 40}, {"v5", 50}}, + addedVals: []testVal{{"v5", 50}, {"v4", 40}}, + expectedVals: []testVal{{"v5", 50}, {"v4", 40}, {"v1", 1}, {"v2", 1}}, }, // generate a configuration with 100 validators, @@ -1208,10 +1382,6 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { 6: randTestVSetCfg(t, 100, 1000), 7: randTestVSetCfg(t, 1000, 1000), - - 8: randTestVSetCfg(t, 10000, 1000), - - 9: randTestVSetCfg(t, 1000, 10000), } for _, cfg := range testCases { @@ -1225,26 +1395,26 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { } } -func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int) { +func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) { // Run election up to nMaxElections times, sort validators by priorities - valSet.IncrementProposerPriority(tmrand.Int()%nMaxElections + 1) - origValsPriSorted := validatorListCopy(valSet.Validators) - sort.Sort(validatorsByPriority(origValsPriSorted)) + valSet.IncrementProposerPriority(tmrand.Int31()%nMaxElections + 1) // apply the changes, get the updated validators, sort by priorities - applyChangesToValSet(t, false, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals) - updatedValsPriSorted := validatorListCopy(valSet.Validators) - sort.Sort(validatorsByPriority(updatedValsPriSorted)) + applyChangesToValSet(t, nil, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals) // basic checks assert.Equal(t, cfg.expectedVals, toTestValList(valSet.Validators)) verifyValidatorSet(t, valSet) // verify that the added validators have the smallest priority: - // - they should be at the beginning of valListNewPriority since it is sorted by priority + // - they should be at the beginning of updatedValsPriSorted since it is + // sorted by priority if len(cfg.addedVals) > 0 { + updatedValsPriSorted := validatorListCopy(valSet.Validators) + sort.Sort(validatorsByPriority(updatedValsPriSorted)) + addedValsPriSlice := updatedValsPriSorted[:len(cfg.addedVals)] - sort.Sort(ValidatorsByAddress(addedValsPriSlice)) + sort.Sort(ValidatorsByVotingPower(addedValsPriSlice)) assert.Equal(t, cfg.addedVals, toTestValList(addedValsPriSlice)) // - and should all have the same priority @@ -1255,31 +1425,50 @@ func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg tes } } +func TestNewValidatorSetFromExistingValidators(t *testing.T) { + size := 5 + vals := make([]*Validator, size) + for i := 0; i < size; i++ { + pv := NewMockPV() + vals[i] = pv.ExtractIntoValidator(int64(i + 1)) + } + valSet := NewValidatorSet(vals) + valSet.IncrementProposerPriority(5) + + newValSet := NewValidatorSet(valSet.Validators) + assert.NotEqual(t, valSet, newValSet) + + existingValSet, err := ValidatorSetFromExistingValidators(valSet.Validators) + assert.NoError(t, err) + assert.Equal(t, valSet, existingValSet) + assert.Equal(t, valSet.CopyIncrementProposerPriority(3), existingValSet.CopyIncrementProposerPriority(3)) +} + func TestValSetUpdateOverflowRelated(t *testing.T) { testCases := []testVSetCfg{ { name: "1 no false overflow error messages for updates", - startVals: []testVal{{"v1", 1}, {"v2", MaxTotalVotingPower - 1}}, + startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}}, updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}}, expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}}, - wantErr: false, + expErr: nil, }, { // this test shows that it is important to apply the updates in the order of the change in power // i.e. apply first updates with decreases in power, v2 change in this case. name: "2 no false overflow error messages for updates", - startVals: []testVal{{"v1", 1}, {"v2", MaxTotalVotingPower - 1}}, + startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}}, updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}}, - expectedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}}, - wantErr: false, + expectedVals: []testVal{{"v2", MaxTotalVotingPower / 2}, {"v1", MaxTotalVotingPower/2 - 1}}, + expErr: nil, }, { name: "3 no false overflow error messages for deletes", startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}}, deletedVals: []testVal{{"v1", 0}}, addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}}, - expectedVals: []testVal{{"v2", 1}, {"v3", 1}, {"v4", MaxTotalVotingPower - 2}}, - wantErr: false, + expectedVals: []testVal{{"v4", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}}, + expErr: nil, }, { name: "4 no false overflow error messages for adds, updates and deletes", @@ -1291,8 +1480,8 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}}, addedVals: []testVal{{"v5", 3}}, expectedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}, {"v5", 3}}, - wantErr: false, + {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}}, + expErr: nil, }, { name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8", @@ -1306,7 +1495,7 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { expectedVals: []testVal{ {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, - wantErr: true, + expErr: ErrTotalVotingPowerOverflow, }, } @@ -1317,7 +1506,7 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { verifyValidatorSet(t, valSet) // execute update and verify returned error is as expected - applyChangesToValSet(t, tt.wantErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals) + applyChangesToValSet(t, tt.expErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals) // verify updated validator set is as expected assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators)) @@ -1326,10 +1515,10 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { } } -func TestVerifyCommitTrusting(t *testing.T) { +func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) { var ( blockID = makeBlockIDRandom() - voteSet, originalValset, vals = randVoteSet(1, 1, PrecommitType, 6, 1) + voteSet, originalValset, vals = randVoteSet(1, 1, tmproto.PrecommitType, 6, 1) commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now()) newValSet, _ = RandValidatorSet(2, 1) ) @@ -1357,7 +1546,7 @@ func TestVerifyCommitTrusting(t *testing.T) { } for _, tc := range testCases { - err = tc.valSet.VerifyCommitTrusting("test_chain_id", blockID, commit.Height, commit, + err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit, tmmath.Fraction{Numerator: 1, Denominator: 3}) if tc.err { assert.Error(t, err) @@ -1365,7 +1554,89 @@ func TestVerifyCommitTrusting(t *testing.T) { assert.NoError(t, err) } } +} + +func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, valSet, vals = randVoteSet(1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower) + commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now()) + ) + require.NoError(t, err) + + err = valSet.VerifyCommitLightTrusting("test_chain_id", commit, + tmmath.Fraction{Numerator: 25, Denominator: 55}) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "int64 overflow") + } +} + +func TestSafeMul(t *testing.T) { + testCases := []struct { + a int64 + b int64 + c int64 + overflow bool + }{ + 0: {0, 0, 0, false}, + 1: {1, 0, 0, false}, + 2: {2, 3, 6, false}, + 3: {2, -3, -6, false}, + 4: {-2, -3, 6, false}, + 5: {-2, 3, -6, false}, + 6: {math.MaxInt64, 1, math.MaxInt64, false}, + 7: {math.MaxInt64 / 2, 2, math.MaxInt64 - 1, false}, + 8: {math.MaxInt64 / 2, 3, 0, true}, + 9: {math.MaxInt64, 2, 0, true}, + } + + for i, tc := range testCases { + c, overflow := safeMul(tc.a, tc.b) + assert.Equal(t, tc.c, c, "#%d", i) + assert.Equal(t, tc.overflow, overflow, "#%d", i) + } +} + +func TestValidatorSetProtoBuf(t *testing.T) { + valset, _ := RandValidatorSet(10, 100) + valset2, _ := RandValidatorSet(10, 100) + valset2.Validators[0] = &Validator{} + + valset3, _ := RandValidatorSet(10, 100) + valset3.Proposer = nil + + valset4, _ := RandValidatorSet(10, 100) + valset4.Proposer = &Validator{} + + testCases := []struct { + msg string + v1 *ValidatorSet + expPass1 bool + expPass2 bool + }{ + {"success", valset, true, true}, + {"fail valSet2, pubkey empty", valset2, false, false}, + {"fail nil Proposer", valset3, false, false}, + {"fail empty Proposer", valset4, false, false}, + {"fail empty valSet", &ValidatorSet{}, true, false}, + {"false nil", nil, true, false}, + } + for _, tc := range testCases { + protoValSet, err := tc.v1.ToProto() + if tc.expPass1 { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + valSet, err := ValidatorSetFromProto(protoValSet) + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.EqualValues(t, tc.v1, valSet, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } } //--------------------- @@ -1387,27 +1658,26 @@ func (valz validatorsByPriority) Less(i, j int) bool { } func (valz validatorsByPriority) Swap(i, j int) { - it := valz[i] - valz[i] = valz[j] - valz[j] = it + valz[i], valz[j] = valz[j], valz[i] } //------------------------------------- -// Sort testVal-s by address. -type testValsByAddress []testVal -func (tvals testValsByAddress) Len() int { +type testValsByVotingPower []testVal + +func (tvals testValsByVotingPower) Len() int { return len(tvals) } -func (tvals testValsByAddress) Less(i, j int) bool { - return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1 +func (tvals testValsByVotingPower) Less(i, j int) bool { + if tvals[i].power == tvals[j].power { + return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1 + } + return tvals[i].power > tvals[j].power } -func (tvals testValsByAddress) Swap(i, j int) { - it := tvals[i] - tvals[i] = tvals[j] - tvals[j] = it +func (tvals testValsByVotingPower) Swap(i, j int) { + tvals[i], tvals[j] = tvals[j], tvals[i] } //------------------------------------- diff --git a/types/validator_test.go b/types/validator_test.go new file mode 100644 index 000000000..5eb2ed7bf --- /dev/null +++ b/types/validator_test.go @@ -0,0 +1,99 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidatorProtoBuf(t *testing.T) { + val, _ := RandValidator(true, 100) + testCases := []struct { + msg string + v1 *Validator + expPass1 bool + expPass2 bool + }{ + {"success validator", val, true, true}, + {"failure empty", &Validator{}, false, false}, + {"failure nil", nil, false, false}, + } + for _, tc := range testCases { + protoVal, err := tc.v1.ToProto() + + if tc.expPass1 { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + + val, err := ValidatorFromProto(protoVal) + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.Equal(t, tc.v1, val, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func TestValidatorValidateBasic(t *testing.T) { + priv := NewMockPV() + pubKey, _ := priv.GetPubKey() + testCases := []struct { + val *Validator + err bool + msg string + }{ + { + val: NewValidator(pubKey, 1), + err: false, + msg: "", + }, + { + val: nil, + err: true, + msg: "nil validator", + }, + { + val: &Validator{ + PubKey: nil, + }, + err: true, + msg: "validator does not have a public key", + }, + { + val: NewValidator(pubKey, -1), + err: true, + msg: "validator has negative voting power", + }, + { + val: &Validator{ + PubKey: pubKey, + Address: nil, + }, + err: true, + msg: "validator address is the wrong size: ", + }, + { + val: &Validator{ + PubKey: pubKey, + Address: []byte{'a'}, + }, + err: true, + msg: "validator address is the wrong size: 61", + }, + } + + for _, tc := range testCases { + err := tc.val.ValidateBasic() + if tc.err { + if assert.Error(t, err) { + assert.Equal(t, tc.msg, err.Error()) + } + } else { + assert.NoError(t, err) + } + } +} diff --git a/types/vote.go b/types/vote.go index da9134cd6..7b841c28a 100644 --- a/types/vote.go +++ b/types/vote.go @@ -8,12 +8,12 @@ import ( "github.com/tendermint/tendermint/crypto" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/protoio" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( - // MaxVoteBytes is a maximum vote size (including amino overhead). - MaxVoteBytes int64 = 223 - nilVoteStr string = "nil-Vote" + nilVoteStr string = "nil-Vote" ) var ( @@ -27,16 +27,18 @@ var ( ) type ErrVoteConflictingVotes struct { - *DuplicateVoteEvidence + VoteA *Vote + VoteB *Vote } func (err *ErrVoteConflictingVotes) Error() string { - return fmt.Sprintf("Conflicting votes from validator %v", err.PubKey.Address()) + return fmt.Sprintf("conflicting votes from validator %X", err.VoteA.ValidatorAddress) } -func NewConflictingVoteError(val *Validator, vote1, vote2 *Vote) *ErrVoteConflictingVotes { +func NewConflictingVoteError(vote1, vote2 *Vote) *ErrVoteConflictingVotes { return &ErrVoteConflictingVotes{ - NewDuplicateVoteEvidence(val.PubKey, vote1, vote2), + VoteA: vote1, + VoteB: vote2, } } @@ -46,14 +48,14 @@ type Address = crypto.Address // Vote represents a prevote, precommit, or commit vote from validators for // consensus. type Vote struct { - Type SignedMsgType `json:"type"` - Height int64 `json:"height"` - Round int `json:"round"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Timestamp time.Time `json:"timestamp"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Signature []byte `json:"signature"` + Type tmproto.SignedMsgType `json:"type"` + Height int64 `json:"height"` + Round int32 `json:"round"` // assume there will not be greater than 2_147_483_647 rounds + BlockID BlockID `json:"block_id"` // zero if vote is nil. + Timestamp time.Time `json:"timestamp"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int32 `json:"validator_index"` + Signature []byte `json:"signature"` } // CommitSig converts the Vote to a CommitSig. @@ -80,11 +82,21 @@ func (vote *Vote) CommitSig() CommitSig { } } -func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote(chainID, vote)) +// VoteSignBytes returns the proto-encoding of the canonicalized Vote, for +// signing. Panics is the marshaling fails. +// +// The encoded Protobuf message is varint length-prefixed (using MarshalDelimited) +// for backwards-compatibility with the Amino encoding, due to e.g. hardware +// devices that rely on this encoding. +// +// See CanonicalizeVote +func VoteSignBytes(chainID string, vote *tmproto.Vote) []byte { + pb := CanonicalizeVote(chainID, vote) + bz, err := protoio.MarshalDelimited(&pb) if err != nil { panic(err) } + return bz } @@ -93,6 +105,17 @@ func (vote *Vote) Copy() *Vote { return &voteCopy } +// String returns a string representation of Vote. +// +// 1. validator index +// 2. first 6 bytes of validator address +// 3. height +// 4. round, +// 5. type byte +// 6. type string +// 7. first 6 bytes of block hash +// 8. first 6 bytes of signature +// 9. timestamp func (vote *Vote) String() string { if vote == nil { return nilVoteStr @@ -100,9 +123,9 @@ func (vote *Vote) String() string { var typeString string switch vote.Type { - case PrevoteType: + case tmproto.PrevoteType: typeString = "Prevote" - case PrecommitType: + case tmproto.PrecommitType: typeString = "Precommit" default: panic("Unknown vote type") @@ -125,8 +148,8 @@ func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { return ErrVoteInvalidValidatorAddress } - - if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { + v := vote.ToProto() + if !pubKey.VerifySignature(VoteSignBytes(chainID, v), vote.Signature) { return ErrVoteInvalidSignature } return nil @@ -137,9 +160,11 @@ func (vote *Vote) ValidateBasic() error { if !IsVoteTypeValid(vote.Type) { return errors.New("invalid Type") } + if vote.Height < 0 { return errors.New("negative Height") } + if vote.Round < 0 { return errors.New("negative Round") } @@ -149,11 +174,13 @@ func (vote *Vote) ValidateBasic() error { if err := vote.BlockID.ValidateBasic(); err != nil { return fmt.Errorf("wrong BlockID: %v", err) } + // BlockID.ValidateBasic would not err if we for instance have an empty hash but a // non-empty PartsSetHeader: if !vote.BlockID.IsZero() && !vote.BlockID.IsComplete() { return fmt.Errorf("blockID must be either empty or complete, got: %v", vote.BlockID) } + if len(vote.ValidatorAddress) != crypto.AddressSize { return fmt.Errorf("expected ValidatorAddress size to be %d bytes, got %d bytes", crypto.AddressSize, @@ -166,8 +193,54 @@ func (vote *Vote) ValidateBasic() error { if len(vote.Signature) == 0 { return errors.New("signature is missing") } + if len(vote.Signature) > MaxSignatureSize { return fmt.Errorf("signature is too big (max: %d)", MaxSignatureSize) } + return nil } + +// ToProto converts the handwritten type to proto generated type +// return type, nil if everything converts safely, otherwise nil, error +func (vote *Vote) ToProto() *tmproto.Vote { + if vote == nil { + return nil + } + + return &tmproto.Vote{ + Type: vote.Type, + Height: vote.Height, + Round: vote.Round, + BlockID: vote.BlockID.ToProto(), + Timestamp: vote.Timestamp, + ValidatorAddress: vote.ValidatorAddress, + ValidatorIndex: vote.ValidatorIndex, + Signature: vote.Signature, + } +} + +// FromProto converts a proto generetad type to a handwritten type +// return type, nil if everything converts safely, otherwise nil, error +func VoteFromProto(pv *tmproto.Vote) (*Vote, error) { + if pv == nil { + return nil, errors.New("nil vote") + } + + blockID, err := BlockIDFromProto(&pv.BlockID) + if err != nil { + return nil, err + } + + vote := new(Vote) + vote.Type = pv.Type + vote.Height = pv.Height + vote.Round = pv.Round + vote.BlockID = *blockID + vote.Timestamp = pv.Timestamp + vote.ValidatorAddress = pv.ValidatorAddress + vote.ValidatorIndex = pv.ValidatorIndex + vote.Signature = pv.Signature + + return vote, vote.ValidateBasic() +} diff --git a/types/vote_set.go b/types/vote_set.go index 82698fe51..42bde9856 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -4,11 +4,11 @@ import ( "bytes" "fmt" "strings" - "sync" - - "github.com/pkg/errors" "github.com/tendermint/tendermint/libs/bits" + tmjson "github.com/tendermint/tendermint/libs/json" + tmsync "github.com/tendermint/tendermint/libs/sync" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -61,11 +61,11 @@ type P2PID string type VoteSet struct { chainID string height int64 - round int - signedMsgType SignedMsgType + round int32 + signedMsgType tmproto.SignedMsgType valSet *ValidatorSet - mtx sync.Mutex + mtx tmsync.Mutex votesBitArray *bits.BitArray votes []*Vote // Primary votes to share sum int64 // Sum of voting power for seen votes, discounting conflicts @@ -75,7 +75,8 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int32, + signedMsgType tmproto.SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { panic("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -107,7 +108,7 @@ func (voteSet *VoteSet) GetHeight() int64 { } // Implements VoteSetReader. -func (voteSet *VoteSet) GetRound() int { +func (voteSet *VoteSet) GetRound() int32 { if voteSet == nil { return -1 } @@ -160,33 +161,34 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { // Ensure that validator index was set if valIndex < 0 { - return false, errors.Wrap(ErrVoteInvalidValidatorIndex, "Index < 0") + return false, fmt.Errorf("index < 0: %w", ErrVoteInvalidValidatorIndex) } else if len(valAddr) == 0 { - return false, errors.Wrap(ErrVoteInvalidValidatorAddress, "Empty address") + return false, fmt.Errorf("empty address: %w", ErrVoteInvalidValidatorAddress) } // Make sure the step matches. if (vote.Height != voteSet.height) || (vote.Round != voteSet.round) || (vote.Type != voteSet.signedMsgType) { - return false, errors.Wrapf(ErrVoteUnexpectedStep, "Expected %d/%d/%d, but got %d/%d/%d", + return false, fmt.Errorf("expected %d/%d/%d, but got %d/%d/%d: %w", voteSet.height, voteSet.round, voteSet.signedMsgType, - vote.Height, vote.Round, vote.Type) + vote.Height, vote.Round, vote.Type, ErrVoteUnexpectedStep) } // Ensure that signer is a validator. lookupAddr, val := voteSet.valSet.GetByIndex(valIndex) if val == nil { - return false, errors.Wrapf(ErrVoteInvalidValidatorIndex, - "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) + return false, fmt.Errorf( + "cannot find validator %d in valSet of size %d: %w", + valIndex, voteSet.valSet.Size(), ErrVoteInvalidValidatorIndex) } // Ensure that the signer has the right address. if !bytes.Equal(valAddr, lookupAddr) { - return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, + return false, fmt.Errorf( "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\n"+ - "Ensure the genesis file is correct across all validators.", - valAddr, lookupAddr, valIndex) + "Ensure the genesis file is correct across all validators: %w", + valAddr, lookupAddr, valIndex, ErrVoteInvalidValidatorAddress) } // If we already know of this vote, return false. @@ -194,18 +196,18 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { if bytes.Equal(existing.Signature, vote.Signature) { return false, nil // duplicate } - return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote) + return false, fmt.Errorf("existing vote: %v; new vote: %v: %w", existing, vote, ErrVoteNonDeterministicSignature) } // Check signature. if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil { - return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) + return false, fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s: %w", voteSet.chainID, val.PubKey, err) } // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) if conflicting != nil { - return added, NewConflictingVoteError(val, conflicting, vote) + return added, NewConflictingVoteError(conflicting, vote) } if !added { panic("Expected to add non-conflicting vote") @@ -214,7 +216,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // Returns (vote, true) if vote exists for valIndex and blockKey. -func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { +func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok bool) { if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { return existing, true } @@ -243,13 +245,13 @@ func (voteSet *VoteSet) addVerifiedVote( // Replace vote if blockKey matches voteSet.maj23. if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey { voteSet.votes[valIndex] = vote - voteSet.votesBitArray.SetIndex(valIndex, true) + voteSet.votesBitArray.SetIndex(int(valIndex), true) } // Otherwise don't add it to voteSet.votes } else { // Add to voteSet.votes and incr .sum voteSet.votes[valIndex] = vote - voteSet.votesBitArray.SetIndex(valIndex, true) + voteSet.votesBitArray.SetIndex(int(valIndex), true) voteSet.sum += votingPower } @@ -364,7 +366,7 @@ func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *bits.BitArray { // NOTE: if validator has conflicting votes, returns "canonical" vote // Implements VoteSetReader. -func (voteSet *VoteSet) GetByIndex(valIndex int) *Vote { +func (voteSet *VoteSet) GetByIndex(valIndex int32) *Vote { if voteSet == nil { return nil } @@ -400,7 +402,7 @@ func (voteSet *VoteSet) IsCommit() bool { if voteSet == nil { return false } - if voteSet.signedMsgType != PrecommitType { + if voteSet.signedMsgType != tmproto.PrecommitType { return false } voteSet.mtx.Lock() @@ -440,6 +442,9 @@ func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) { //-------------------------------------------------------------------------------- // Strings and JSON +// String returns a string representation of VoteSet. +// +// See StringIndented. func (voteSet *VoteSet) String() string { if voteSet == nil { return "nil-VoteSet" @@ -447,9 +452,18 @@ func (voteSet *VoteSet) String() string { return voteSet.StringIndented("") } +// StringIndented returns an indented String. +// +// Height Round Type +// Votes +// Votes bit array +// 2/3+ majority +// +// See Vote#String. func (voteSet *VoteSet) StringIndented(indent string) string { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() + voteStrings := make([]string, len(voteSet.votes)) for i, vote := range voteSet.votes { if vote == nil { @@ -476,7 +490,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - return cdc.MarshalJSON(VoteSetJSON{ + return tmjson.Marshal(VoteSetJSON{ voteSet.voteStrings(), voteSet.bitArrayString(), voteSet.peerMaj23s, @@ -526,6 +540,15 @@ func (voteSet *VoteSet) voteStrings() []string { return voteStrings } +// StringShort returns a short representation of VoteSet. +// +// 1. height +// 2. round +// 3. signed msg type +// 4. first 2/3+ majority +// 5. fraction of voted power +// 6. votes bit array +// 7. 2/3+ majority for each peer func (voteSet *VoteSet) StringShort() string { if voteSet == nil { return "nil-VoteSet" @@ -547,11 +570,13 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { //-------------------------------------------------------------------------------- // Commit -// MakeCommit constructs a Commit from the VoteSet. -// Panics if the vote type is not PrecommitType or if -// there's no +2/3 votes for a single block. +// MakeCommit constructs a Commit from the VoteSet. It only includes precommits +// for the block, which has 2/3+ majority, and nil. +// +// Panics if the vote type is not PrecommitType or if there's no +2/3 votes for +// a single block. func (voteSet *VoteSet) MakeCommit() *Commit { - if voteSet.signedMsgType != PrecommitType { + if voteSet.signedMsgType != tmproto.PrecommitType { panic("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() @@ -565,7 +590,12 @@ func (voteSet *VoteSet) MakeCommit() *Commit { // For every validator, get the precommit commitSigs := make([]CommitSig, len(voteSet.votes)) for i, v := range voteSet.votes { - commitSigs[i] = v.CommitSig() + commitSig := v.CommitSig() + // if block ID exists but doesn't match, exclude sig + if commitSig.ForBlock() && !v.BlockID.Equals(*voteSet.maj23) { + commitSig = NewCommitSigAbsent() + } + commitSigs[i] = commitSig } return NewCommit(voteSet.GetHeight(), voteSet.GetRound(), *voteSet.maj23, commitSigs) @@ -598,13 +628,13 @@ func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes { func (vs *blockVotes) addVerifiedVote(vote *Vote, votingPower int64) { valIndex := vote.ValidatorIndex if existing := vs.votes[valIndex]; existing == nil { - vs.bitArray.SetIndex(valIndex, true) + vs.bitArray.SetIndex(int(valIndex), true) vs.votes[valIndex] = vote vs.sum += votingPower } } -func (vs *blockVotes) getByIndex(index int) *Vote { +func (vs *blockVotes) getByIndex(index int32) *Vote { if vs == nil { return nil } @@ -616,10 +646,10 @@ func (vs *blockVotes) getByIndex(index int) *Vote { // Common interface between *consensus.VoteSet and types.Commit type VoteSetReader interface { GetHeight() int64 - GetRound() int + GetRound() int32 Type() byte Size() int BitArray() *bits.BitArray - GetByIndex(int) *Vote + GetByIndex(int32) *Vote IsCommit() bool } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index fc4eb76f3..4899b04b2 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -9,140 +9,141 @@ import ( "github.com/tendermint/tendermint/crypto" tmrand "github.com/tendermint/tendermint/libs/rand" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" ) -// NOTE: privValidators are in order -func randVoteSet( - height int64, - round int, - signedMsgType SignedMsgType, - numValidators int, - votingPower int64, -) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := RandValidatorSet(numValidators, votingPower) - return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators -} - -// Convenience: Return new vote with different validator address/index -func withValidator(vote *Vote, addr []byte, idx int) *Vote { - vote = vote.Copy() - vote.ValidatorAddress = addr - vote.ValidatorIndex = idx - return vote -} - -// Convenience: Return new vote with different height -func withHeight(vote *Vote, height int64) *Vote { - vote = vote.Copy() - vote.Height = height - return vote -} - -// Convenience: Return new vote with different round -func withRound(vote *Vote, round int) *Vote { - vote = vote.Copy() - vote.Round = round - return vote -} - -// Convenience: Return new vote with different type -func withType(vote *Vote, signedMsgType byte) *Vote { - vote = vote.Copy() - vote.Type = SignedMsgType(signedMsgType) - return vote -} - -// Convenience: Return new vote with different blockHash -func withBlockHash(vote *Vote, blockHash []byte) *Vote { - vote = vote.Copy() - vote.BlockID.Hash = blockHash - return vote -} - -// Convenience: Return new vote with different blockParts -func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { - vote = vote.Copy() - vote.BlockID.PartsHeader = blockPartsHeader - return vote -} - -func TestAddVote(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) +func TestVoteSet_AddVote_Good(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) val0 := privValidators[0] - // t.Logf(">> %v", voteSet) - val0p, err := val0.GetPubKey() require.NoError(t, err) val0Addr := val0p.Address() - if voteSet.GetByAddress(val0Addr) != nil { - t.Errorf("expected GetByAddress(val0.Address) to be nil") - } - if voteSet.BitArray().GetIndex(0) { - t.Errorf("expected BitArray.GetIndex(0) to be false") - } + assert.Nil(t, voteSet.GetByAddress(val0Addr)) + assert.False(t, voteSet.BitArray().GetIndex(0)) blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority") - } + assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") vote := &Vote{ ValidatorAddress: val0Addr, ValidatorIndex: 0, // since privValidators are in order Height: height, Round: round, - Type: PrevoteType, + Type: tmproto.PrevoteType, Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } _, err = signAddVote(val0, vote, voteSet) - if err != nil { - t.Error(err) + require.NoError(t, err) + + assert.NotNil(t, voteSet.GetByAddress(val0Addr)) + assert.True(t, voteSet.BitArray().GetIndex(0)) + blockID, ok = voteSet.TwoThirdsMajority() + assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") +} + +func TestVoteSet_AddVote_Bad(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) + + voteProto := &Vote{ + ValidatorAddress: nil, + ValidatorIndex: -1, + Height: height, + Round: round, + Timestamp: tmtime.Now(), + Type: tmproto.PrevoteType, + BlockID: BlockID{nil, PartSetHeader{}}, } - if voteSet.GetByAddress(val0Addr) == nil { - t.Errorf("expected GetByAddress(val0.Address) to be present") + // val0 votes for nil. + { + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() + vote := withValidator(voteProto, addr, 0) + added, err := signAddVote(privValidators[0], vote, voteSet) + if !added || err != nil { + t.Errorf("expected VoteSet.Add to succeed") + } } - if !voteSet.BitArray().GetIndex(0) { - t.Errorf("expected BitArray.GetIndex(0) to be true") + + // val0 votes again for some block. + { + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() + vote := withValidator(voteProto, addr, 0) + added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + if added || err == nil { + t.Errorf("expected VoteSet.Add to fail, conflicting vote.") + } } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority") + + // val1 votes on another height + { + pubKey, err := privValidators[1].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() + vote := withValidator(voteProto, addr, 1) + added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) + if added || err == nil { + t.Errorf("expected VoteSet.Add to fail, wrong height") + } + } + + // val2 votes on another round + { + pubKey, err := privValidators[2].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() + vote := withValidator(voteProto, addr, 2) + added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) + if added || err == nil { + t.Errorf("expected VoteSet.Add to fail, wrong round") + } + } + + // val3 votes of another type. + { + pubKey, err := privValidators[3].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() + vote := withValidator(voteProto, addr, 3) + added, err := signAddVote(privValidators[3], withType(vote, byte(tmproto.PrecommitType)), voteSet) + if added || err == nil { + t.Errorf("expected VoteSet.Add to fail, wrong type") + } } } -func Test2_3Majority(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) +func TestVoteSet_2_3Majority(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Type: PrevoteType, + Type: tmproto.PrevoteType, Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } // 6 out of 10 voted for nil. - for i := 0; i < 6; i++ { + for i := int32(0); i < 6; i++ { pubKey, err := privValidators[i].GetPubKey() require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) _, err = signAddVote(privValidators[i], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority") - } + assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") // 7th validator voted for some blockhash { @@ -151,13 +152,9 @@ func Test2_3Majority(t *testing.T) { addr := pubKey.Address() vote := withValidator(voteProto, addr, 6) _, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority") - } + assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") } // 8th validator voted for nil. @@ -167,23 +164,19 @@ func Test2_3Majority(t *testing.T) { addr := pubKey.Address() vote := withValidator(voteProto, addr, 7) _, err = signAddVote(privValidators[7], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if !ok || !blockID.IsZero() { - t.Errorf("there should be 2/3 majority for nil") - } + assert.True(t, ok || blockID.IsZero(), "there should be 2/3 majority for nil") } } -func Test2_3MajorityRedux(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) +func TestVoteSet_2_3MajorityRedux(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) - blockPartsTotal := 123 - blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} + blockPartsTotal := uint32(123) + blockPartSetHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in @@ -191,25 +184,22 @@ func Test2_3MajorityRedux(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: PrevoteType, - BlockID: BlockID{blockHash, blockPartsHeader}, + Type: tmproto.PrevoteType, + BlockID: BlockID{blockHash, blockPartSetHeader}, } // 66 out of 100 voted for nil. - for i := 0; i < 66; i++ { + for i := int32(0); i < 66; i++ { pubKey, err := privValidators[i].GetPubKey() require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, i) _, err = signAddVote(privValidators[i], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority") - } + assert.False(t, ok || !blockID.IsZero(), + "there should be no 2/3 majority") // 67th validator voted for nil { @@ -218,13 +208,10 @@ func Test2_3MajorityRedux(t *testing.T) { adrr := pubKey.Address() vote := withValidator(voteProto, adrr, 66) _, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority: last vote added was nil") - } + assert.False(t, ok || !blockID.IsZero(), + "there should be no 2/3 majority: last vote added was nil") } // 68th validator voted for a different BlockParts PartSetHeader @@ -234,14 +221,11 @@ func Test2_3MajorityRedux(t *testing.T) { addr := pubKey.Address() vote := withValidator(voteProto, addr, 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err = signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) - if err != nil { - t.Error(err) - } + _, err = signAddVote(privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority: last vote added had different PartSetHeader Hash") - } + assert.False(t, ok || !blockID.IsZero(), + "there should be no 2/3 majority: last vote added had different PartSetHeader Hash") } // 69th validator voted for different BlockParts Total @@ -250,15 +234,12 @@ func Test2_3MajorityRedux(t *testing.T) { require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 68) - blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} - _, err = signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) - if err != nil { - t.Error(err) - } + blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartSetHeader.Hash} + _, err = signAddVote(privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority: last vote added had different PartSetHeader Total") - } + assert.False(t, ok || !blockID.IsZero(), + "there should be no 2/3 majority: last vote added had different PartSetHeader Total") } // 70th validator voted for different BlockHash @@ -268,110 +249,29 @@ func Test2_3MajorityRedux(t *testing.T) { addr := pubKey.Address() vote := withValidator(voteProto, addr, 69) _, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("there should be no 2/3 majority: last vote added had different BlockHash") - } + assert.False(t, ok || !blockID.IsZero(), + "there should be no 2/3 majority: last vote added had different BlockHash") } - // 71st validator voted for the right BlockHash & BlockPartsHeader + // 71st validator voted for the right BlockHash & BlockPartSetHeader { pubKey, err := privValidators[70].GetPubKey() require.NoError(t, err) addr := pubKey.Address() vote := withValidator(voteProto, addr, 70) _, err = signAddVote(privValidators[70], vote, voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) { - t.Errorf("there should be 2/3 majority") - } - } -} - -func TestBadVotes(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) - - voteProto := &Vote{ - ValidatorAddress: nil, - ValidatorIndex: -1, - Height: height, - Round: round, - Timestamp: tmtime.Now(), - Type: PrevoteType, - BlockID: BlockID{nil, PartSetHeader{}}, - } - - // val0 votes for nil. - { - pubKey, err := privValidators[0].GetPubKey() - require.NoError(t, err) - addr := pubKey.Address() - vote := withValidator(voteProto, addr, 0) - added, err := signAddVote(privValidators[0], vote, voteSet) - if !added || err != nil { - t.Errorf("expected VoteSet.Add to succeed") - } - } - - // val0 votes again for some block. - { - pubKey, err := privValidators[0].GetPubKey() require.NoError(t, err) - addr := pubKey.Address() - vote := withValidator(voteProto, addr, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) - if added || err == nil { - t.Errorf("expected VoteSet.Add to fail, conflicting vote.") - } - } - - // val1 votes on another height - { - pubKey, err := privValidators[1].GetPubKey() - require.NoError(t, err) - addr := pubKey.Address() - vote := withValidator(voteProto, addr, 1) - added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) - if added || err == nil { - t.Errorf("expected VoteSet.Add to fail, wrong height") - } - } - - // val2 votes on another round - { - pubKey, err := privValidators[2].GetPubKey() - require.NoError(t, err) - addr := pubKey.Address() - vote := withValidator(voteProto, addr, 2) - added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) - if added || err == nil { - t.Errorf("expected VoteSet.Add to fail, wrong round") - } - } - - // val3 votes of another type. - { - pubKey, err := privValidators[3].GetPubKey() - require.NoError(t, err) - addr := pubKey.Address() - vote := withValidator(voteProto, addr, 3) - added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) - if added || err == nil { - t.Errorf("expected VoteSet.Add to fail, wrong type") - } + blockID, ok = voteSet.TwoThirdsMajority() + assert.True(t, ok && blockID.Equals(BlockID{blockHash, blockPartSetHeader}), + "there should be 2/3 majority") } } -func TestConflicts(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) +func TestVoteSet_Conflicts(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 4, 1) blockHash1 := tmrand.Bytes(32) blockHash2 := tmrand.Bytes(32) @@ -381,7 +281,7 @@ func TestConflicts(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: PrevoteType, + Type: tmproto.PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -402,42 +302,32 @@ func TestConflicts(t *testing.T) { { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) - if added { - t.Errorf("expected VoteSet.Add to fail, conflicting vote.") - } - if err == nil { - t.Errorf("expected VoteSet.Add to return error, conflicting vote.") - } + assert.False(t, added, "conflicting vote") + assert.Error(t, err, "conflicting vote") } // start tracking blockHash1 - voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}}) + err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}}) + require.NoError(t, err) // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) - if !added { - t.Errorf("expected VoteSet.Add to succeed, called SetPeerMaj23().") - } - if err == nil { - t.Errorf("expected VoteSet.Add to return error, conflicting vote.") - } + assert.True(t, added, "called SetPeerMaj23()") + assert.Error(t, err, "conflicting vote") } // attempt tracking blockHash2, should fail because already set for peerA. - voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}}) + err = voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}}) + require.Error(t, err) // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0Addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) - if added { - t.Errorf("expected VoteSet.Add to fail, duplicate SetPeerMaj23() from peerA") - } - if err == nil { - t.Errorf("expected VoteSet.Add to return error, conflicting vote.") - } + assert.False(t, added, "duplicate SetPeerMaj23() from peerA") + assert.Error(t, err, "conflicting vote") } // val1 votes for blockHash1. @@ -481,7 +371,8 @@ func TestConflicts(t *testing.T) { } // now attempt tracking blockHash1 - voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}}) + err = voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}}) + require.NoError(t, err) // val2 votes for blockHash1. { @@ -490,12 +381,8 @@ func TestConflicts(t *testing.T) { addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) - if !added { - t.Errorf("expected VoteSet.Add to succeed") - } - if err == nil { - t.Errorf("expected VoteSet.Add to return error, conflicting vote") - } + assert.True(t, added) + assert.Error(t, err, "conflicting vote") } // check @@ -509,13 +396,12 @@ func TestConflicts(t *testing.T) { if !voteSet.HasTwoThirdsAny() { t.Errorf("we should have 2/3 if any votes") } - } -func TestMakeCommit(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) - blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} +func TestVoteSet_MakeCommit(t *testing.T) { + height, round := int64(1), int32(0) + voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrecommitType, 10, 1) + blockHash, blockPartSetHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ ValidatorAddress: nil, @@ -523,12 +409,12 @@ func TestMakeCommit(t *testing.T) { Height: height, Round: round, Timestamp: tmtime.Now(), - Type: PrecommitType, - BlockID: BlockID{blockHash, blockPartsHeader}, + Type: tmproto.PrecommitType, + BlockID: BlockID{blockHash, blockPartSetHeader}, } // 6 out of 10 voted for some block. - for i := 0; i < 6; i++ { + for i := int32(0); i < 6; i++ { pv, err := privValidators[i].GetPubKey() assert.NoError(t, err) addr := pv.Address() @@ -549,12 +435,10 @@ func TestMakeCommit(t *testing.T) { addr := pv.Address() vote := withValidator(voteProto, addr, 6) vote = withBlockHash(vote, tmrand.Bytes(32)) - vote = withBlockPartsHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) + vote = withBlockPartSetHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) _, err = signAddVote(privValidators[6], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } // The 8th voted like everyone else. @@ -564,9 +448,7 @@ func TestMakeCommit(t *testing.T) { addr := pv.Address() vote := withValidator(voteProto, addr, 7) _, err = signAddVote(privValidators[7], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } // The 9th voted for nil. @@ -578,20 +460,71 @@ func TestMakeCommit(t *testing.T) { vote.BlockID = BlockID{} _, err = signAddVote(privValidators[8], vote, voteSet) - if err != nil { - t.Error(err) - } + require.NoError(t, err) } commit := voteSet.MakeCommit() // Commit should have 10 elements - if len(commit.Signatures) != 10 { - t.Errorf("expected commit to include %d elems, got %d", 10, len(commit.Signatures)) - } + assert.Equal(t, 10, len(commit.Signatures)) // Ensure that Commit is good. if err := commit.ValidateBasic(); err != nil { t.Errorf("error in Commit.ValidateBasic(): %v", err) } } + +// NOTE: privValidators are in order +func randVoteSet( + height int64, + round int32, + signedMsgType tmproto.SignedMsgType, + numValidators int, + votingPower int64, +) (*VoteSet, *ValidatorSet, []PrivValidator) { + valSet, privValidators := RandValidatorSet(numValidators, votingPower) + return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators +} + +// Convenience: Return new vote with different validator address/index +func withValidator(vote *Vote, addr []byte, idx int32) *Vote { + vote = vote.Copy() + vote.ValidatorAddress = addr + vote.ValidatorIndex = idx + return vote +} + +// Convenience: Return new vote with different height +func withHeight(vote *Vote, height int64) *Vote { + vote = vote.Copy() + vote.Height = height + return vote +} + +// Convenience: Return new vote with different round +func withRound(vote *Vote, round int32) *Vote { + vote = vote.Copy() + vote.Round = round + return vote +} + +// Convenience: Return new vote with different type +func withType(vote *Vote, signedMsgType byte) *Vote { + vote = vote.Copy() + vote.Type = tmproto.SignedMsgType(signedMsgType) + return vote +} + +// Convenience: Return new vote with different blockHash +func withBlockHash(vote *Vote, blockHash []byte) *Vote { + vote = vote.Copy() + vote.BlockID.Hash = blockHash + return vote +} + +// Convenience: Return new vote with different blockParts +func withBlockPartSetHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { + vote = vote.Copy() + vote.BlockID.PartSetHeader = blockPartsHeader + return vote +} diff --git a/types/vote_test.go b/types/vote_test.go index eb4f6a955..51e20cd7a 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,24 +1,26 @@ package types import ( - "math" "testing" "time" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/libs/protoio" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func examplePrevote() *Vote { - return exampleVote(byte(PrevoteType)) + return exampleVote(byte(tmproto.PrevoteType)) } func examplePrecommit() *Vote { - return exampleVote(byte(PrecommitType)) + return exampleVote(byte(tmproto.PrecommitType)) } func exampleVote(t byte) *Vote { @@ -28,13 +30,13 @@ func exampleVote(t byte) *Vote { } return &Vote{ - Type: SignedMsgType(t), + Type: tmproto.SignedMsgType(t), Height: 12345, Round: 2, Timestamp: stamp, BlockID: BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), - PartsHeader: PartSetHeader{ + PartSetHeader: PartSetHeader{ Total: 1000000, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, @@ -46,9 +48,10 @@ func exampleVote(t byte) *Vote { func TestVoteSignable(t *testing.T) { vote := examplePrecommit() - signBytes := vote.SignBytes("test_chain_id") - - expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote("test_chain_id", vote)) + v := vote.ToProto() + signBytes := VoteSignBytes("test_chain_id", v) + pb := CanonicalizeVote("test_chain_id", v) + expected, err := protoio.MarshalDelimited(&pb) require.NoError(t, err) require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") @@ -68,7 +71,7 @@ func TestVoteSignBytesTestVectors(t *testing.T) { }, // with proper (fixed size) height and round (PreCommit): 1: { - "", &Vote{Height: 1, Round: 1, Type: PrecommitType}, + "", &Vote{Height: 1, Round: 1, Type: tmproto.PrecommitType}, []byte{ 0x21, // length 0x8, // (field_number << 3) | wire_type @@ -83,7 +86,7 @@ func TestVoteSignBytesTestVectors(t *testing.T) { }, // with proper (fixed size) height and round (PreVote): 2: { - "", &Vote{Height: 1, Round: 1, Type: PrevoteType}, + "", &Vote{Height: 1, Round: 1, Type: tmproto.PrevoteType}, []byte{ 0x21, // length 0x8, // (field_number << 3) | wire_type @@ -126,17 +129,19 @@ func TestVoteSignBytesTestVectors(t *testing.T) { }, } for i, tc := range tests { - got := tc.vote.SignBytes(tc.chainID) - require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i) + v := tc.vote.ToProto() + got := VoteSignBytes(tc.chainID, v) + assert.Equal(t, len(tc.want), len(got), "test case #%v: got unexpected sign bytes length for Vote.", i) + assert.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i) } } func TestVoteProposalNotEq(t *testing.T) { - cv := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) - p := CanonicalizeProposal("", &Proposal{Height: 1, Round: 1}) - vb, err := cdc.MarshalBinaryLengthPrefixed(cv) + cv := CanonicalizeVote("", &tmproto.Vote{Height: 1, Round: 1}) + p := CanonicalizeProposal("", &tmproto.Proposal{Height: 1, Round: 1}) + vb, err := proto.Marshal(&cv) require.NoError(t, err) - pb, err := cdc.MarshalBinaryLengthPrefixed(p) + pb, err := proto.Marshal(&p) require.NoError(t, err) require.NotEqual(t, vb, pb) } @@ -147,39 +152,40 @@ func TestVoteVerifySignature(t *testing.T) { require.NoError(t, err) vote := examplePrecommit() - signBytes := vote.SignBytes("test_chain_id") + v := vote.ToProto() + signBytes := VoteSignBytes("test_chain_id", v) // sign it - err = privVal.SignVote("test_chain_id", vote) + err = privVal.SignVote("test_chain_id", v) require.NoError(t, err) // verify the same vote - valid := pubkey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature) + valid := pubkey.VerifySignature(VoteSignBytes("test_chain_id", v), v.Signature) require.True(t, valid) // serialize, deserialize and verify again.... - precommit := new(Vote) - bs, err := cdc.MarshalBinaryLengthPrefixed(vote) + precommit := new(tmproto.Vote) + bs, err := proto.Marshal(v) require.NoError(t, err) - err = cdc.UnmarshalBinaryLengthPrefixed(bs, &precommit) + err = proto.Unmarshal(bs, precommit) require.NoError(t, err) // verify the transmitted vote - newSignBytes := precommit.SignBytes("test_chain_id") + newSignBytes := VoteSignBytes("test_chain_id", precommit) require.Equal(t, string(signBytes), string(newSignBytes)) - valid = pubkey.VerifyBytes(newSignBytes, precommit.Signature) + valid = pubkey.VerifySignature(newSignBytes, precommit.Signature) require.True(t, valid) } func TestIsVoteTypeValid(t *testing.T) { tc := []struct { name string - in SignedMsgType + in tmproto.SignedMsgType out bool }{ - {"Prevote", PrevoteType, true}, - {"Precommit", PrecommitType, true}, - {"InvalidType", SignedMsgType(0x3), false}, + {"Prevote", tmproto.PrevoteType, true}, + {"Precommit", tmproto.PrecommitType, true}, + {"InvalidType", tmproto.SignedMsgType(0x3), false}, } for _, tt := range tc { @@ -211,46 +217,15 @@ func TestVoteVerify(t *testing.T) { } } -func TestMaxVoteBytes(t *testing.T) { - // time is varint encoded so need to pick the max. - // year int, month Month, day, hour, min, sec, nsec int, loc *Location - timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) - - vote := &Vote{ - ValidatorAddress: crypto.AddressHash([]byte("validator_address")), - ValidatorIndex: math.MaxInt64, - Height: math.MaxInt64, - Round: math.MaxInt64, - Timestamp: timestamp, - Type: PrevoteType, - BlockID: BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), - PartsHeader: PartSetHeader{ - Total: math.MaxInt64, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), - }, - }, - } - - privVal := NewMockPV() - err := privVal.SignVote("test_chain_id", vote) - require.NoError(t, err) - - bz, err := cdc.MarshalBinaryLengthPrefixed(vote) - require.NoError(t, err) - - assert.EqualValues(t, MaxVoteBytes, len(bz)) -} - func TestVoteString(t *testing.T) { str := examplePrecommit().String() - expected := `Vote{56789:6AF1F4111082 12345/02/2(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + expected := `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } str2 := examplePrevote().String() - expected = `Vote{56789:6AF1F4111082 12345/02/1(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + expected = `Vote{56789:6AF1F4111082 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` //nolint:lll //ignore line length for tests if str2 != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) } @@ -279,10 +254,42 @@ func TestVoteValidateBasic(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { vote := examplePrecommit() - err := privVal.SignVote("test_chain_id", vote) + v := vote.ToProto() + err := privVal.SignVote("test_chain_id", v) + vote.Signature = v.Signature require.NoError(t, err) tc.malleateVote(vote) assert.Equal(t, tc.expectErr, vote.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) } } + +func TestVoteProtobuf(t *testing.T) { + privVal := NewMockPV() + vote := examplePrecommit() + v := vote.ToProto() + err := privVal.SignVote("test_chain_id", v) + vote.Signature = v.Signature + require.NoError(t, err) + + testCases := []struct { + msg string + v1 *Vote + expPass bool + }{ + {"success", vote, true}, + {"fail vote validate basic", &Vote{}, false}, + {"failure nil", nil, false}, + } + for _, tc := range testCases { + protoProposal := tc.v1.ToProto() + + v, err := VoteFromProto(protoProposal) + if tc.expPass { + require.NoError(t, err) + require.Equal(t, tc.v1, v, tc.msg) + } else { + require.Error(t, err) + } + } +} diff --git a/version/version.go b/version/version.go index b0521c2c1..5082d73c1 100644 --- a/version/version.go +++ b/version/version.go @@ -1,66 +1,24 @@ package version var ( - // GitCommit is the current HEAD set using ldflags. - GitCommit string - - // Version is the built softwares version. - Version = TMCoreSemVer -) - -func init() { - if GitCommit != "" { - Version += "-" + GitCommit - } -} - -const ( // TMCoreSemVer is the current version of Tendermint Core. // It's the Semantic Version of the software. - // Must be a string because scripts like dist.sh read this file. - // XXX: Don't change the name of this variable or you will break - // automation :) - TMCoreSemVer = "0.33.4" + TMCoreSemVer string +) +const ( // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.16.2" + ABCISemVer = "0.17.0" + ABCIVersion = ABCISemVer ) -// Protocol is used for implementation agnostic versioning. -type Protocol uint64 - -// Uint64 returns the Protocol version as a uint64, -// eg. for compatibility with ABCI types. -func (p Protocol) Uint64() uint64 { - return uint64(p) -} - var ( // P2PProtocol versions all p2p behaviour and msgs. // This includes proposer selection. - P2PProtocol Protocol = 7 + P2PProtocol uint64 = 8 // BlockProtocol versions all block data structures and processing. // This includes validity of blocks and state updates. - BlockProtocol Protocol = 10 + BlockProtocol uint64 = 11 ) - -//------------------------------------------------------------------------ -// Version types - -// App includes the protocol and software version for the application. -// This information is included in ResponseInfo. The App.Protocol can be -// updated in ResponseEndBlock. -type App struct { - Protocol Protocol `json:"protocol"` - Software string `json:"software"` -} - -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. -type Consensus struct { - Block Protocol `json:"block"` - App Protocol `json:"app"` -}