diff --git a/.azure-pipelines/stage/verify.yml b/.azure-pipelines/stage/verify.yml deleted file mode 100644 index 20bff07dc7cc..000000000000 --- a/.azure-pipelines/stage/verify.yml +++ /dev/null @@ -1,84 +0,0 @@ -parameters: - -# Auth -- name: authGCP - type: string - default: "" - - -jobs: -- job: packages_x64 - displayName: Debs (x64) - condition: | - and(not(canceled()), - succeeded(), - ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), - ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), - ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true')) - timeoutInMinutes: 120 - pool: envoy-x64-small - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "distribution" - itemPattern: "distribution/x64/packages.x64.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - template: ../ci.yml - parameters: - ciTarget: verify_distro - cacheName: verify_distro - publishTestResults: false - tmpfsDockerDisabled: true - env: - ENVOY_DOCKER_IN_DOCKER: 1 - -- job: packages_arm64 - displayName: Debs (arm64) - condition: | - and(not(canceled()), - succeeded(), - ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), - ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), - ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true')) - timeoutInMinutes: 120 - pool: "envoy-arm-small" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "distribution" - itemPattern: "distribution/arm64/packages.arm64.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - template: ../ci.yml - parameters: - managedAgent: false - ciTarget: verify_distro - cacheName: verify_distro - rbe: false - artifactSuffix: ".arm64" - publishTestResults: false - tmpfsDockerDisabled: true - env: - ENVOY_DOCKER_IN_DOCKER: 1 - -- job: verified - displayName: Verification complete - dependsOn: ["packages_x64", "packages_arm64"] - pool: - vmImage: $(agentUbuntu) - # This condition ensures that this (required) check passes if all of - # the preceding checks either pass or are skipped - # adapted from: - # https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#job-to-job-dependencies-within-one-stage - condition: | - and( - eq(variables['Build.Reason'], 'PullRequest'), - in(dependencies.packages_x64.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'), - in(dependencies.packages_arm64.result, 'Succeeded', 'SucceededWithIssues', 'Skipped')) - steps: - - checkout: none - - bash: | - echo "checks complete" diff --git a/.azure-pipelines/stages.yml b/.azure-pipelines/stages.yml index a0fc0c9cbc1b..72438bf0ab46 100644 --- a/.azure-pipelines/stages.yml +++ b/.azure-pipelines/stages.yml @@ -103,13 +103,3 @@ stages: runPackaging: variables['RUN_PACKAGING'] publishDockerhub: variables['PUBLISH_DOCKERHUB'] publishGithubRelease: variables['PUBLISH_GITHUB_RELEASE'] - -- stage: verify - displayName: Verify - dependsOn: ["env", "publish"] - variables: - RUN_DOCKER: $[stageDependencies.env.repo.outputs['run.docker']] - jobs: - - template: stage/verify.yml - parameters: - authGCP: $(GcpServiceAccountKey) diff --git a/.github/workflows/_cache.yml b/.github/workflows/_cache.yml index a21194312df9..a30cf01df779 100644 --- a/.github/workflows/_cache.yml +++ b/.github/workflows/_cache.yml @@ -11,12 +11,21 @@ on: app-key: required: true inputs: + arch: + type: string + default: x64 + cache-suffix: + type: string + default: image-tag: type: string required: true request: type: string required: true + runs-on: + type: string + default: ubuntu-24.04 lock-repository: type: string default: envoyproxy/ci-mutex @@ -37,7 +46,7 @@ on: jobs: docker: - runs-on: ubuntu-22.04 + runs-on: ${{ inputs.runs-on || 'ubuntu-24.04' }} steps: - uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.2.35 id: appauth @@ -47,9 +56,10 @@ jobs: key: ${{ secrets.app-key }} - uses: envoyproxy/toolshed/gh-actions/docker/cache/prime@actions-v0.2.35 id: docker - name: Prime Docker cache (${{ inputs.image-tag }}) + name: Prime Docker cache (${{ inputs.image-tag }}${{ inputs.cache-suffix }}) with: image-tag: ${{ inputs.image-tag }} + key-suffix: ${{ inputs.cache-suffix }} lock-token: ${{ steps.appauth.outputs.token }} lock-repository: ${{ inputs.lock-repository }} - uses: envoyproxy/toolshed/gh-actions/jq@actions-v0.2.35 @@ -59,11 +69,11 @@ jobs: input-format: yaml input: | cached: ${{ steps.docker.outputs.cached }} - key: ${{ inputs.image-tag }} + key: ${{ inputs.image-tag }}${{ inputs.cache-suffix }} - uses: envoyproxy/toolshed/gh-actions/json/table@actions-v0.2.35 name: Summary with: json: ${{ steps.data.outputs.value }} output-path: GITHUB_STEP_SUMMARY title: >- - Cache (Docker x64) + Cache (Docker ${{ inputs.arch }}) diff --git a/.github/workflows/_load.yml b/.github/workflows/_load.yml index acbf553f4113..85991ba4baaa 100644 --- a/.github/workflows/_load.yml +++ b/.github/workflows/_load.yml @@ -157,9 +157,25 @@ jobs: secrets: app-id: ${{ secrets.lock-app-id }} app-key: ${{ secrets.lock-app-key }} - uses: ./.github/workflows/_cache.yml + name: ${{ matrix.name || matrix.target }} needs: request + uses: ./.github/workflows/_cache.yml if: ${{ inputs.cache-docker && ! fromJSON(needs.request.outputs.skip) }} with: - request: ${{ toJSON(needs.request.outputs) }} + arch: ${{ matrix.arch }} + cache-suffix: ${{ matrix.cache-suffix }} image-tag: ${{ fromJSON(needs.request.outputs.build-image).default }} + request: ${{ toJSON(needs.request.outputs) }} + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + include: + - target: docker-x64 + name: Docker (x64) + arch: x64 + - target: docker-arm64 + name: Docker (arm64) + arch: arm64 + cache-suffix: -arm64 + runs-on: envoy-arm64-small diff --git a/.github/workflows/_stage_publish.yml b/.github/workflows/_publish_publish.yml similarity index 100% rename from .github/workflows/_stage_publish.yml rename to .github/workflows/_publish_publish.yml diff --git a/.github/workflows/_publish_verify.yml b/.github/workflows/_publish_verify.yml new file mode 100644 index 000000000000..075e4aad0440 --- /dev/null +++ b/.github/workflows/_publish_verify.yml @@ -0,0 +1,166 @@ +name: Verify + +permissions: + contents: read + +on: + workflow_call: + inputs: + request: + type: string + required: true + trusted: + type: boolean + required: true + +concurrency: + group: >- + ${{ github.actor != 'trigger-release-envoy[bot]' + && github.event.inputs.head_ref + || github.run_id + }}-${{ github.event.workflow.id }}-verify + cancel-in-progress: true + + +jobs: + verify-examples: + permissions: + contents: read + packages: read + name: ${{ matrix.name || matrix.target }} + uses: ./.github/workflows/_run.yml + with: + bazel-extra: ${{ matrix.bazel-extra || '--config=rbe-envoy-engflow' }} + cache-build-image: ${{ matrix.cache-build-image }} + cache-build-image-key-suffix: ${{ matrix.arch == 'arm64' && format('-{0}', matrix.arch) || '' }} + container-command: ${{ matrix.container-command }} + concurrency-suffix: -${{ matrix.arch || 'x64' }} + rbe: ${{ matrix.rbe }} + request: ${{ inputs.request }} + runs-on: ${{ matrix.runs-on || 'ubuntu-24.04' }} + steps-pre: ${{ matrix.steps-pre }} + source: ${{ matrix.source }} + target: ${{ matrix.target }} + trusted: ${{ inputs.trusted }} + strategy: + fail-fast: false + matrix: + include: + - name: examples + target: verify_examples + rbe: false + source: | + export NO_BUILD_SETUP=1 + steps-pre: | + - run: | + # Install expected host packages + export DEBIAN_FRONTEND=noninteractive + sudo apt-get -qq update -y + sudo apt-get -qq install -y --no-install-recommends expect gettext yq whois + shell: bash + - id: url + uses: envoyproxy/toolshed/gh-actions/jq@actions-v0.2.35 + with: + options: -Rr + input: >- + ${{ inputs.trusted + && fromJSON(inputs.request).request.sha + || fromJSON(inputs.request).request.ref }} + filter: | + .[:7] as $sha + | if ${{ inputs.trusted }} then + "envoy-postsubmit" + else + "envoy-pr" + end + | . as $bucket + | "https://storage.googleapis.com/\($bucket)/\($sha)" + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 + with: + url: %{{ steps.url.outputs.value }}/docker/envoy.tar + variant: dev + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 + with: + url: %{{ steps.url.outputs.value }}/docker/envoy-contrib.tar + variant: contrib-dev + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 + with: + url: %{{ steps.url.outputs.value }}/docker/envoy-google-vrp.tar + variant: google-vrp-dev + - run: docker images | grep envoy + shell: bash + + verify-distro: + permissions: + contents: read + packages: read + name: ${{ matrix.name || matrix.target }} + uses: ./.github/workflows/_run.yml + with: + bazel-extra: ${{ matrix.bazel-extra || '--config=rbe-envoy-engflow' }} + cache-build-image: ${{ fromJSON(inputs.request).request.build-image.default }} + cache-build-image-key-suffix: ${{ matrix.arch == 'arm64' && format('-{0}', matrix.arch) || '' }} + container-command: ./ci/run_envoy_docker.sh + concurrency-suffix: -${{ matrix.arch || 'x64' }} + rbe: ${{ matrix.rbe && matrix.rbe || false }} + request: ${{ inputs.request }} + runs-on: ${{ matrix.runs-on || 'ubuntu-24.04' }} + source: | + export NO_BUILD_SETUP=1 + export ENVOY_DOCKER_IN_DOCKER=1 + target: ${{ matrix.target }} + trusted: ${{ inputs.trusted }} + steps-pre: | + - uses: envoyproxy/toolshed/gh-actions/jq@actions-v0.2.30 + id: url + with: + options: -Rr + input: >- + ${{ inputs.trusted + && fromJSON(inputs.request).request.sha + || fromJSON(inputs.request).request.ref }} + filter: | + .[:7] as $sha + | if ${{ inputs.trusted }} then + "envoy-postsubmit" + else + "envoy-pr" + end + | . as $bucket + | "https://storage.googleapis.com/\($bucket)/\($sha)/release/release.signed.tar.zst" + - uses: envoyproxy/toolshed/gh-actions/fetch@actions-v0.2.30 + id: fetch + with: + url: %{{ steps.url.outputs.value }} + - run: | + echo ARCH=${{ matrix.arch || 'x64' }} >> $GITHUB_ENV + echo DEB_ARCH=${{ matrix.arch != 'arm64' && 'amd64' || 'arm64' }} >> $GITHUB_ENV + shell: bash + - run: | + TEMP_DIR=$(mktemp -d) + zstd --stdout -d %{{ steps.fetch.outputs.path }} | tar --warning=no-timestamp -xf - -C "${TEMP_DIR}" + mkdir ${TEMP_DIR}/debs + tar xf ${TEMP_DIR}/bin/debs.tar.gz -C ${TEMP_DIR}/debs + mkdir -p ${TEMP_DIR}/distribution/deb + cp -a ${TEMP_DIR}/debs/*_${DEB_ARCH}* ${TEMP_DIR}/distribution/deb + cp -a ${TEMP_DIR}/signing.key ${TEMP_DIR}/distribution + mkdir -p %{{ runner.temp }}/distribution/${ARCH} + tar czf %{{ runner.temp }}/distribution/${ARCH}/packages.${ARCH}.tar.gz -C ${TEMP_DIR}/distribution . + shell: bash + + strategy: + fail-fast: false + matrix: + include: + + - name: verify_distro_x64 + target: verify_distro + rbe: true + + - name: verify_distro_arm64 + target: verify_distro + arch: arm64 + bazel-extra: >- + --config=cache-envoy-engflow + --config=bes-envoy-engflow + runs-on: envoy-arm64-small diff --git a/.github/workflows/_run.yml b/.github/workflows/_run.yml index ce82d5ac8ad1..e65e87a4e2cb 100644 --- a/.github/workflows/_run.yml +++ b/.github/workflows/_run.yml @@ -21,11 +21,16 @@ on: default: 75 cache-build-image: type: string + cache-build-image-key-suffix: + type: string catch-errors: type: boolean default: false checkout-extra: type: string + concurrency-suffix: + type: string + default: container-command: type: string default: ./ci/run_envoy_docker.sh @@ -141,7 +146,7 @@ concurrency: ${{ github.actor != 'trigger-release-envoy[bot]' && github.head_ref || github.run_id - }}-${{ github.workflow }}-${{ inputs.target }} + }}-${{ github.workflow }}-${{ inputs.target }}${{ inputs.concurrency-suffix }} cancel-in-progress: true env: @@ -190,6 +195,7 @@ jobs: uses: envoyproxy/toolshed/gh-actions/docker/cache/restore@actions-v0.2.35 with: image_tag: ${{ inputs.cache-build-image }} + key-suffix: ${{ inputs.cache-build-image-key-suffix }} - uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.2.35 id: appauth @@ -259,11 +265,11 @@ jobs: env: GITHUB_TOKEN: ${{ inputs.trusted && steps.appauth.outputs.token || github.token }} ENVOY_DOCKER_BUILD_DIR: ${{ runner.temp }} - ENVOY_RBE: ${{ inputs.rbe != 'false' && 1 || '' }} + ENVOY_RBE: ${{ inputs.rbe == true && 1 || '' }} RBE_KEY: ${{ secrets.rbe-key }} BAZEL_BUILD_EXTRA_OPTIONS: >- --config=remote-ci ${{ inputs.bazel-extra }} - ${{ inputs.rbe != 'false' && format('--jobs={0}', inputs.bazel-rbe-jobs) || '' }} + ${{ inputs.rbe == true && format('--jobs={0}', inputs.bazel-rbe-jobs) || '' }} BAZEL_FAKE_SCM_REVISION: ${{ github.event_name == 'pull_request' && 'e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9' || '' }} CI_TARGET_BRANCH: ${{ fromJSON(inputs.request).request.target-branch }} diff --git a/.github/workflows/_stage_verify.yml b/.github/workflows/_stage_verify.yml deleted file mode 100644 index 3011bbc9bd15..000000000000 --- a/.github/workflows/_stage_verify.yml +++ /dev/null @@ -1,88 +0,0 @@ -name: Verify - -permissions: - contents: read - -on: - workflow_call: - inputs: - request: - type: string - required: true - trusted: - type: boolean - required: true - -concurrency: - group: >- - ${{ github.actor != 'trigger-release-envoy[bot]' - && github.event.inputs.head_ref - || github.run_id - }}-${{ github.event.workflow.id }}-verify - cancel-in-progress: true - - -jobs: - verify: - permissions: - contents: read - packages: read - name: ${{ matrix.name || matrix.target }} - uses: ./.github/workflows/_run.yml - with: - cache-build-image: - container-command: - rbe: ${{ matrix.rbe }} - request: ${{ inputs.request }} - runs-on: ubuntu-24.04 - steps-pre: ${{ matrix.steps-pre }} - source: ${{ matrix.source }} - target: ${{ matrix.target }} - trusted: ${{ inputs.trusted }} - strategy: - fail-fast: false - matrix: - include: - - name: examples - target: verify_examples - source: | - export NO_BUILD_SETUP=1 - rbe: false - steps-pre: | - - id: url - uses: envoyproxy/toolshed/gh-actions/jq@actions-v0.2.35 - with: - options: -Rr - input: >- - ${{ inputs.trusted - && fromJSON(inputs.request).request.sha - || fromJSON(inputs.request).request.ref }} - filter: | - .[:7] as $sha - | if ${{ inputs.trusted }} then - "envoy-postsubmit" - else - "envoy-pr" - end - | . as $bucket - | "https://storage.googleapis.com/\($bucket)/\($sha)" - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 - with: - url: %{{ steps.url.outputs.value }}/docker/envoy.tar - variant: dev - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 - with: - url: %{{ steps.url.outputs.value }}/docker/envoy-contrib.tar - variant: contrib-dev - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.2.35 - with: - url: %{{ steps.url.outputs.value }}/docker/envoy-google-vrp.tar - variant: google-vrp-dev - - run: docker images | grep envoy - shell: bash - - run: | - # Install expected host packages - export DEBIAN_FRONTEND=noninteractive - sudo apt-get -qq update -y - sudo apt-get -qq install -y --no-install-recommends expect gettext yq whois - shell: bash diff --git a/.github/workflows/envoy-publish.yml b/.github/workflows/envoy-publish.yml index ab7a7b896292..df33cd5221ba 100644 --- a/.github/workflows/envoy-publish.yml +++ b/.github/workflows/envoy-publish.yml @@ -62,7 +62,7 @@ jobs: if: ${{ fromJSON(needs.load.outputs.request).run.publish }} needs: - load - uses: ./.github/workflows/_stage_publish.yml + uses: ./.github/workflows/_publish_publish.yml name: Publish with: request: ${{ needs.load.outputs.request }} @@ -75,7 +75,7 @@ jobs: if: ${{ fromJSON(needs.load.outputs.request).run.verify }} needs: - load - uses: ./.github/workflows/_stage_verify.yml + uses: ./.github/workflows/_publish_verify.yml name: Verify with: request: ${{ needs.load.outputs.request }} diff --git a/SECURITY.md b/SECURITY.md index 5a5233601a5f..a76fc0cc8aa6 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -465,24 +465,20 @@ and security team to ensure they still qualify for inclusion on the list. | Organization | End User | Last Review | |:-------------:|:--------:|:-----------:| -| AWS | No | 06/21 | -| Cilium | No | 06/21 | -| Cloud Foundry | No | 06/21 | -| Datawire | No | 06/21 | -| F5 | No | 06/21 | -| Google | No | 06/21 | -| IBM | No | 06/21 | -| Istio | No | 06/21 | -| Microsoft | No | 2/21 | -| Red Hat | No | 06/21 | -| solo.io | No | 06/21 | -| Tetrate | No | 06/21 | -| VMware | No | 06/21 | -| Pinterest | Yes | 06/21 | -| Dropbox | Yes | 01/20 | -| Stripe | Yes | 01/20 | -| Square | Yes | 05/21 | -| Apple | Yes | 05/21 | -| Spotify | Yes | 06/21 | -| Netflix | Yes | 06/22 | +| AWS | No | 07/24 | +| Cilium | No | 07/24 | +| Cloud Foundry | No | 07/24 | +| F5 | No | 07/24 | +| Google | No | 07/24 | +| Istio | No | 07/24 | +| Microsoft | No | 07/24 | +| Red Hat | No | 07/24 | +| VMware | No | 07/24 | +| Tetrate | No | 07/24 | +| solo.io | No | 07/24 | +| Pinterest | Yes | 07/24 | +| Dropbox | Yes | 07/24 | +| Apple | Yes | 07/24 | +| Spotify | Yes | 02/21 | +| Netflix | Yes | 07/24 | | Slack | Yes | 07/24 | diff --git a/api/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto b/api/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto index 3684f994d65f..aae62145731a 100644 --- a/api/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto +++ b/api/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto @@ -52,7 +52,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // Here are config requirements // -// 1. the target field should be among the following primitive types: `string`, `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, `fixed32`, `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`. +// 1. the target field should be among the following primitive types: `string`, +// `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, `fixed32`, +// `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`, `map`. // // 2. the target field could be repeated. // @@ -61,9 +63,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Output Format // ------------- // -// 1. the extracted field names/values will be wrapped in be ``field`` -> ``values``, which will be added in the dynamic ``metadata``. +// 1. the extracted field names/values will be wrapped in be ``field`` -> ``values``, which will be added in the dynamic ``metadata``. // -// 2. if the field value is empty, a empty ```` will be set. +// 2. if the field value is empty, an empty ```` will be set. // // Performance // ----------- diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 04d980d76742..0ef1e9091d91 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -985,13 +985,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "proto-field-extraction", project_desc = "Library that supports the extraction from protobuf binary", project_url = "https://github.com/grpc-ecosystem/proto-field-extraction", - version = "2dfe27548e1f21a665f9068b97b2fc5beb678566", - sha256 = "ddbbd0dd07012339ac467f5fdac5c294e1efcdc93bb4b7152d468ddbfc9772f0", + version = "d5d39f0373e9b6691c32c85929838b1006bcb3fb", + sha256 = "cba864db90806515afa553aaa2fb3683df2859a7535e53a32cb9619da9cebc59", strip_prefix = "proto-field-extraction-{version}", urls = ["https://github.com/grpc-ecosystem/proto-field-extraction/archive/{version}.zip"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.grpc_json_transcoder", "envoy.filters.http.grpc_field_extraction"], - release_date = "2023-07-18", + release_date = "2024-07-10", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/grpc-ecosystem/proto-field-extraction/blob/{version}/LICENSE", @@ -1200,12 +1200,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "0d70743fc056f254743082221eda54eba7431fa7", - sha256 = "b2c0ad26505d93416305618b5af10be35280a8d166c341f3e9795704914d1e12", + version = "36723962ef5c9f3f9f42093ff9cbe057bc7a80c4", + sha256 = "8735afd08104215a8487cc9f2ffff1adc16e6168dc61c4e65127a3fb23d90c54", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["controlplane", "dataplane_core"], - release_date = "2024-08-09", + release_date = "2024-08-11", cpe = "N/A", license = "BSD-3-Clause", license_url = "https://github.com/google/quiche/blob/{version}/LICENSE", diff --git a/changelogs/current.yaml b/changelogs/current.yaml index 86f5204385de..26e77912e721 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -175,6 +175,9 @@ new_features: change: | Prefer using IPv6 address when addresses from both families are available. Can be reverted by setting ``envoy.reloadable_features.prefer_ipv6_dns_on_macos`` to false. +- area: grpc_field_extraction + change: | + Added ``map`` support: Target fields of type ``map`` can be extracted and added to dynamic metadata. - area: rbac change: | Added :ref:`delay_deny ` to support deny connection after diff --git a/ci/do_ci.sh b/ci/do_ci.sh index 8f3db2df7474..038a7c7e4c06 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -55,6 +55,8 @@ FETCH_PROTO_TARGETS=( @com_github_bufbuild_buf//:bin/buf //tools/proto_format/...) +GCS_REDIRECT_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}" + retry () { local n wait iterations wait="${1}" @@ -481,7 +483,16 @@ case $CI_TARGET in else TARGET=coverage fi - "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" "/source/generated/${TARGET}" "$TARGET" + GCS_LOCATION=$( + bazel run //tools/gcs:upload \ + "${GCS_ARTIFACT_BUCKET}" \ + "${GCP_SERVICE_ACCOUNT_KEY_PATH}" \ + "/source/generated/${TARGET}" \ + "$TARGET" \ + "${GCS_REDIRECT_PATH}") + if [[ "${COVERAGE_FAILED}" -eq 1 ]]; then + echo "##vso[task.logissue type=error]Coverage failed, check artifact at: ${GCS_LOCATION}" + fi ;; debug) @@ -642,7 +653,12 @@ case $CI_TARGET in docker-upload) setup_clang_toolchain - "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" "${BUILD_DIR}/build_images" docker + bazel run //tools/gcs:upload \ + "${GCS_ARTIFACT_BUCKET}" \ + "${GCP_SERVICE_ACCOUNT_KEY_PATH}" \ + "${BUILD_DIR}/build_images" \ + "docker" \ + "${GCS_REDIRECT_PATH}" ;; dockerhub-publish) @@ -684,7 +700,12 @@ case $CI_TARGET in docs-upload) setup_clang_toolchain - "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" /source/generated/docs docs + bazel run //tools/gcs:upload \ + "${GCS_ARTIFACT_BUCKET}" \ + "${GCP_SERVICE_ACCOUNT_KEY_PATH}" \ + /source/generated/docs \ + docs \ + "${GCS_REDIRECT_PATH}" ;; fetch|fetch-*) @@ -907,7 +928,12 @@ case $CI_TARGET in setup_clang_toolchain bazel build "${BAZEL_BUILD_OPTIONS[@]}" //distribution:signed cp -a bazel-bin/distribution/release.signed.tar.zst "${BUILD_DIR}/envoy/" - "${ENVOY_SRCDIR}/ci/upload_gcs_artifact.sh" "${BUILD_DIR}/envoy" release + bazel run //tools/gcs:upload \ + "${GCS_ARTIFACT_BUCKET}" \ + "${GCP_SERVICE_ACCOUNT_KEY_PATH}" \ + "${BUILD_DIR}/envoy" \ + "release" \ + "${GCS_REDIRECT_PATH}" ;; sizeopt) diff --git a/source/common/filesystem/inotify/watcher_impl.cc b/source/common/filesystem/inotify/watcher_impl.cc index 534cd67f1933..f335c2ad62d3 100644 --- a/source/common/filesystem/inotify/watcher_impl.cc +++ b/source/common/filesystem/inotify/watcher_impl.cc @@ -54,7 +54,10 @@ absl::Status WatcherImpl::addWatch(absl::string_view path, uint32_t events, OnCh absl::Status WatcherImpl::onInotifyEvent() { while (true) { - uint8_t buffer[sizeof(inotify_event) + NAME_MAX + 1]; + // The buffer needs to be suitably aligned to store the first inotify_event structure. + // If there are multiple events returned by the read call, the kernel is responsible for + // properly aligning subsequent inotify_event structures (per `man inotify`). + alignas(inotify_event) uint8_t buffer[sizeof(inotify_event) + NAME_MAX + 1]; ssize_t rc = read(inotify_fd_, &buffer, sizeof(buffer)); if (rc == -1 && errno == EAGAIN) { return absl::OkStatus(); diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index a031d98faa38..d94b988dfea4 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -53,6 +53,7 @@ #include "source/extensions/path/match/uri_template/uri_template_match.h" #include "source/extensions/path/rewrite/uri_template/uri_template_rewrite.h" +#include "absl/container/inlined_vector.h" #include "absl/strings/match.h" namespace Envoy { @@ -671,9 +672,9 @@ RouteEntryImplBase::RouteEntryImplBase(const CommonVirtualHostSharedPtr& vhost, return; } - weighted_clusters_config_ = - std::make_unique(std::move(weighted_clusters), total_weight, - route.route().weighted_clusters().header_name()); + weighted_clusters_config_ = std::make_unique( + std::move(weighted_clusters), total_weight, route.route().weighted_clusters().header_name(), + route.route().weighted_clusters().runtime_key_prefix()); } else if (route.route().cluster_specifier_case() == envoy::config::route::v3::RouteAction::ClusterSpecifierCase:: @@ -1347,12 +1348,15 @@ RouteConstSharedPtr RouteEntryImplBase::clusterEntry(const Http::RequestHeaderMa return cluster_specifier_plugin_->route(shared_from_this(), headers); } } - return pickWeightedCluster(headers, random_value, true); + return pickWeightedCluster(headers, random_value); } +// Selects a cluster depending on weight parameters from configuration or from headers. +// This function takes into account the weights set through configuration or through +// runtime parameters. +// Returns selected cluster, or nullptr if weighted configuration is invalid. RouteConstSharedPtr RouteEntryImplBase::pickWeightedCluster(const Http::HeaderMap& headers, - const uint64_t random_value, - const bool ignore_overflow) const { + const uint64_t random_value) const { absl::optional random_value_from_header; // Retrieve the random value from the header if corresponding header name is specified. // weighted_clusters_config_ is known not to be nullptr here. If it were, pickWeightedCluster @@ -1380,23 +1384,55 @@ RouteConstSharedPtr RouteEntryImplBase::pickWeightedCluster(const Http::HeaderMa } } + auto runtime_key_prefix_configured = + (weighted_clusters_config_->runtime_key_prefix_.length() ? true : false); + uint32_t total_cluster_weight = weighted_clusters_config_->total_cluster_weight_; + absl::InlinedVector cluster_weights; + + // if runtime config is used, we need to recompute total_weight + if (runtime_key_prefix_configured) { + // Temporary storage to hold consistent cluster weights. Since cluster weight + // can be changed with runtime keys, we need a way to gather all the weight + // and aggregate the total without a change in between. + // The InlinedVector will be able to handle at least 4 cluster weights + // without allocation. For cases when more clusters are needed, it is + // reserved to ensure at most a single allocation. + cluster_weights.reserve(weighted_clusters_config_->weighted_clusters_.size()); + + total_cluster_weight = 0; + for (const WeightedClusterEntrySharedPtr& cluster : + weighted_clusters_config_->weighted_clusters_) { + auto cluster_weight = cluster->clusterWeight(); + cluster_weights.push_back(cluster_weight); + if (cluster_weight > std::numeric_limits::max() - total_cluster_weight) { + IS_ENVOY_BUG("Sum of weight cannot overflow 2^32"); + return nullptr; + } + total_cluster_weight += cluster_weight; + } + } + + if (total_cluster_weight == 0) { + IS_ENVOY_BUG("Sum of weight cannot be zero"); + return nullptr; + } const uint64_t selected_value = (random_value_from_header.has_value() ? random_value_from_header.value() : random_value) % - weighted_clusters_config_->total_cluster_weight_; + total_cluster_weight; uint64_t begin = 0; uint64_t end = 0; + auto cluster_weight = cluster_weights.begin(); // Find the right cluster to route to based on the interval in which // the selected value falls. The intervals are determined as // [0, cluster1_weight), [cluster1_weight, cluster1_weight+cluster2_weight),.. for (const WeightedClusterEntrySharedPtr& cluster : weighted_clusters_config_->weighted_clusters_) { - end = begin + cluster->clusterWeight(); - if (!ignore_overflow) { - // end > total_cluster_weight: This case can only occur with Runtimes, - // when the user specifies invalid weights such that - // sum(weights) > total_cluster_weight. - ASSERT(end <= weighted_clusters_config_->total_cluster_weight_); + + if (runtime_key_prefix_configured) { + end = begin + *cluster_weight++; + } else { + end = begin + cluster->clusterWeight(); } if (selected_value >= begin && selected_value < end) { @@ -1415,7 +1451,8 @@ RouteConstSharedPtr RouteEntryImplBase::pickWeightedCluster(const Http::HeaderMa begin = end; } - PANIC("unexpected"); + IS_ENVOY_BUG("unexpected"); + return nullptr; } absl::Status RouteEntryImplBase::validateClusters( diff --git a/source/common/router/config_impl.h b/source/common/router/config_impl.h index 2a6bfc723532..f9383d6bd22c 100644 --- a/source/common/router/config_impl.h +++ b/source/common/router/config_impl.h @@ -1089,13 +1089,16 @@ class RouteEntryImplBase : public RouteEntryAndRoute, struct WeightedClustersConfig { WeightedClustersConfig(const std::vector&& weighted_clusters, uint64_t total_cluster_weight, - const std::string& random_value_header_name) + const std::string& random_value_header_name, + const std::string& runtime_key_prefix) : weighted_clusters_(std::move(weighted_clusters)), total_cluster_weight_(total_cluster_weight), - random_value_header_name_(random_value_header_name) {} + random_value_header_name_(random_value_header_name), + runtime_key_prefix_(runtime_key_prefix) {} const std::vector weighted_clusters_; const uint64_t total_cluster_weight_; const std::string random_value_header_name_; + const std::string runtime_key_prefix_; }; protected: @@ -1207,8 +1210,8 @@ class RouteEntryImplBase : public RouteEntryAndRoute, const Http::HeaderMap& headers, const RouteEntryAndRoute* route_selector_override) const; - RouteConstSharedPtr pickWeightedCluster(const Http::HeaderMap& headers, uint64_t random_value, - bool ignore_overflow) const; + RouteConstSharedPtr pickWeightedCluster(const Http::HeaderMap& headers, + uint64_t random_value) const; // Default timeout is 15s if nothing is specified in the route config. static const uint64_t DEFAULT_ROUTE_TIMEOUT_MS = 15000; diff --git a/source/extensions/filters/http/grpc_field_extraction/extractor.h b/source/extensions/filters/http/grpc_field_extraction/extractor.h index 4178e73fb090..d321b3651ec4 100644 --- a/source/extensions/filters/http/grpc_field_extraction/extractor.h +++ b/source/extensions/filters/http/grpc_field_extraction/extractor.h @@ -24,8 +24,8 @@ struct RequestField { // The request field path. absl::string_view path; - // The request field values. - std::vector values; + // The request field value. + ProtobufWkt::Value value; }; using ExtractionResult = std::vector; diff --git a/source/extensions/filters/http/grpc_field_extraction/extractor_impl.cc b/source/extensions/filters/http/grpc_field_extraction/extractor_impl.cc index adbaa44181bd..8470310aa09b 100644 --- a/source/extensions/filters/http/grpc_field_extraction/extractor_impl.cc +++ b/source/extensions/filters/http/grpc_field_extraction/extractor_impl.cc @@ -7,7 +7,6 @@ #include "source/common/common/logger.h" -#include "absl/strings/str_format.h" #include "proto_field_extraction/field_value_extractor/field_value_extractor_factory.h" #include "proto_field_extraction/field_value_extractor/field_value_extractor_interface.h" @@ -39,18 +38,14 @@ ExtractorImpl::processRequest(Protobuf::field_extraction::MessageData& message) ExtractionResult result; for (const auto& it : per_field_extractors_) { - auto extracted_values = it.second->Extract(message); - if (!extracted_values.ok()) { - return extracted_values.status(); + absl::StatusOr extracted_value = it.second->ExtractValue(message); + if (!extracted_value.ok()) { + return extracted_value.status(); } ENVOY_LOG_MISC(debug, "extracted the following resource values from the {} field: {}", it.first, - std::accumulate(extracted_values.value().begin(), extracted_values.value().end(), - std::string(), - [](const std::string& lhs, const std::string& rhs) { - return absl::StrFormat("%s, %s", lhs, rhs); - })); - result.push_back({it.first, std::move(extracted_values.value())}); + extracted_value->DebugString()); + result.push_back({it.first, std::move(*extracted_value)}); } return result; diff --git a/source/extensions/filters/http/grpc_field_extraction/filter.cc b/source/extensions/filters/http/grpc_field_extraction/filter.cc index fb66c448dffd..7ae1fb12f326 100644 --- a/source/extensions/filters/http/grpc_field_extraction/filter.cc +++ b/source/extensions/filters/http/grpc_field_extraction/filter.cc @@ -214,10 +214,7 @@ void Filter::handleExtractionResult(const ExtractionResult& result) { ProtobufWkt::Struct dest_metadata; for (const auto& req_field : result) { RELEASE_ASSERT(!req_field.path.empty(), "`req_field.path` shouldn't be empty"); - auto* list = (*dest_metadata.mutable_fields())[req_field.path].mutable_list_value(); - for (const auto& value : req_field.values) { - list->add_values()->set_string_value(value); - } + (*dest_metadata.mutable_fields())[req_field.path] = req_field.value; } if (dest_metadata.fields_size() > 0) { ENVOY_STREAM_LOG(debug, "injected dynamic metadata `{}` with `{}`", *decoder_callbacks_, diff --git a/source/extensions/filters/http/rate_limit_quota/client_impl.cc b/source/extensions/filters/http/rate_limit_quota/client_impl.cc index b51e5725fda1..f454695b55bc 100644 --- a/source/extensions/filters/http/rate_limit_quota/client_impl.cc +++ b/source/extensions/filters/http/rate_limit_quota/client_impl.cc @@ -64,10 +64,14 @@ RateLimitQuotaUsageReports RateLimitClientImpl::buildReport(absl::optional bucket_id) { - ASSERT(stream_ != nullptr); - // Build the report and then send the report to RLQS server. - // `end_stream` should always be set to false as we don't want to close the stream locally. - stream_->sendMessage(buildReport(bucket_id), /*end_stream=*/false); + if (stream_ != nullptr) { + // Build the report and then send the report to RLQS server. + // `end_stream` should always be set to false as we don't want to close the stream locally. + stream_->sendMessage(buildReport(bucket_id), /*end_stream=*/false); + } else { + // Don't send any reports if stream has already been closed. + ENVOY_LOG(debug, "The stream has already been closed; no reports will be sent."); + } } void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) { @@ -143,20 +147,18 @@ void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) void RateLimitClientImpl::closeStream() { // Close the stream if it is in open state. - if (stream_ != nullptr && !stream_closed_) { + if (stream_ != nullptr) { ENVOY_LOG(debug, "Closing gRPC stream"); stream_->closeStream(); - stream_closed_ = true; stream_->resetStream(); + stream_ = nullptr; } } void RateLimitClientImpl::onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) { - // TODO(tyxia) Revisit later, maybe add some logging. - stream_closed_ = true; ENVOY_LOG(debug, "gRPC stream closed remotely with status {}: {}", status, message); - closeStream(); + stream_ = nullptr; } absl::Status RateLimitClientImpl::startStream(const StreamInfo::StreamInfo& stream_info) { diff --git a/source/extensions/filters/http/rate_limit_quota/client_impl.h b/source/extensions/filters/http/rate_limit_quota/client_impl.h index 4999ef62f9ca..b471755d2420 100644 --- a/source/extensions/filters/http/rate_limit_quota/client_impl.h +++ b/source/extensions/filters/http/rate_limit_quota/client_impl.h @@ -58,8 +58,6 @@ class RateLimitClientImpl : public RateLimitClient, // Build the usage report (i.e., the request sent to RLQS server) from the buckets in quota bucket // cache. RateLimitQuotaUsageReports buildReport(absl::optional bucket_id); - - bool stream_closed_ = false; // Domain from filter configuration. The same domain name throughout the whole lifetime of client. std::string domain_name_; // Client is stored as the bare object since there is no ownership transfer involved. diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc index 526a874ee330..8a37e53703ca 100644 --- a/test/common/http/conn_manager_impl_test_2.cc +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -490,7 +490,7 @@ TEST_F(HttpConnectionManagerImplTest, DrainConnectionUponCompletionVsOnDrainTime Event::MockTimer* connection_duration_timer = setUpTimer(); EXPECT_CALL(*connection_duration_timer, enableTimer(_, _)); // Set up connection. - setup(false, ""); + setup(); // Create a filter so we can encode responses. MockStreamDecoderFilter* filter = new NiceMock(); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index b9679737b71b..8cc2795e4613 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -6305,21 +6305,60 @@ TEST_F(RouteMatcherTest, WeightedClusters) { { Http::TestRequestHeaderMapImpl headers = genHeaders("www3.lyft.com", "/foo", "GET"); EXPECT_CALL(runtime.snapshot_, featureEnabled("www3", 100, _)).WillRepeatedly(Return(true)); + // new total weight will be 140 EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster1", 30)) .WillRepeatedly(Return(10)); - - // We return an invalid value here, one that is greater than 100 - // Expect any random value > 10 to always land in cluster2. EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster2", 30)) .WillRepeatedly(Return(120)); EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster3", 40)) .WillRepeatedly(Return(10)); - EXPECT_EQ("cluster1", config.route(headers, 1005)->routeEntry()->clusterName()); + // 1005 % total_weight == 25 + EXPECT_EQ("cluster2", config.route(headers, 1005)->routeEntry()->clusterName()); EXPECT_EQ("cluster2", config.route(headers, 82)->routeEntry()->clusterName()); EXPECT_EQ("cluster2", config.route(headers, 92)->routeEntry()->clusterName()); } + // Weighted Cluster with runtime values under total weight + // Makes sure new total weight is taken into account + // if total_weight is not recomputed, it will raise "unexpected" error + { + Http::TestRequestHeaderMapImpl headers = genHeaders("www3.lyft.com", "/foo", "GET"); + EXPECT_CALL(runtime.snapshot_, featureEnabled("www3", 100, _)).WillRepeatedly(Return(true)); + // new total weight will be 6 + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster1", 30)) + .WillRepeatedly(Return(1)); + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster2", 30)) + .WillRepeatedly(Return(2)); + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster3", 40)) + .WillRepeatedly(Return(3)); + + // 1005 % total_weight == 3 + EXPECT_EQ("cluster3", config.route(headers, 1005)->routeEntry()->clusterName()); + EXPECT_EQ("cluster3", config.route(headers, 82)->routeEntry()->clusterName()); + EXPECT_EQ("cluster2", config.route(headers, 92)->routeEntry()->clusterName()); + } + + // Total weight is set to zero + { + Http::TestRequestHeaderMapImpl headers = genHeaders("www3.lyft.com", "/foo", "GET"); + EXPECT_CALL(runtime.snapshot_, featureEnabled("www3", 100, _)).WillRepeatedly(Return(true)); + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster1", 30)) + .WillRepeatedly(Return(0)); + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster2", 30)) + .WillRepeatedly(Return(0)); + EXPECT_CALL(runtime.snapshot_, getInteger("www3_weights.cluster3", 40)) + .WillRepeatedly(Return(0)); + +#if defined(NDEBUG) + // sum of weight returns nullptr + EXPECT_EQ(nullptr, config.route(headers, 42)); +#else + // in debug mode, it aborts + EXPECT_DEATH(config.route(headers, 42), "Sum of weight cannot be zero"); +#endif + } + // Weighted Cluster with runtime values, total weight = 10000 { Http::TestRequestHeaderMapImpl headers = genHeaders("www4.lyft.com", "/foo", "GET"); diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index d1ee77911444..93c9300e18f3 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -182,6 +182,9 @@ class ExtProcIntegrationTest : public HttpIntegrationTest, test::integration::filters::LoggingTestFilterConfig logging_filter_config; logging_filter_config.set_logging_id(ext_proc_filter_name); logging_filter_config.set_upstream_cluster_name(valid_grpc_cluster_name); + // No need to check the bytes received for observability mode because it is a + // "send and go" mode. + logging_filter_config.set_check_received_bytes(!proto_config_.observability_mode()); envoy::extensions::filters::network::http_connection_manager::v3::HttpFilter logging_filter; logging_filter.set_name("logging-test-filter"); logging_filter.mutable_typed_config()->PackFrom(logging_filter_config); diff --git a/test/extensions/filters/http/ext_proc/logging_test_filter.cc b/test/extensions/filters/http/ext_proc/logging_test_filter.cc index 30838fd74600..702bd61804d0 100644 --- a/test/extensions/filters/http/ext_proc/logging_test_filter.cc +++ b/test/extensions/filters/http/ext_proc/logging_test_filter.cc @@ -22,8 +22,10 @@ namespace ExternalProcessing { // A test filter that retrieve the logging info on encodeComplete. class LoggingTestFilter : public Http::PassThroughFilter { public: - LoggingTestFilter(const std::string& logging_id, const std::string& cluster_name) - : logging_id_(logging_id), expected_cluster_name_(cluster_name) {} + LoggingTestFilter(const std::string& logging_id, const std::string& cluster_name, + bool check_received_bytes) + : logging_id_(logging_id), expected_cluster_name_(cluster_name), + check_received_bytes_(check_received_bytes) {} void encodeComplete() override { ASSERT(decoder_callbacks_ != nullptr); const Envoy::StreamInfo::FilterStateSharedPtr& filter_state = @@ -32,7 +34,9 @@ class LoggingTestFilter : public Http::PassThroughFilter { filter_state->getDataReadOnly(logging_id_); if (ext_proc_logging_info != nullptr) { EXPECT_NE(ext_proc_logging_info->bytesSent(), 0); - EXPECT_NE(ext_proc_logging_info->bytesReceived(), 0); + if (check_received_bytes_) { + EXPECT_NE(ext_proc_logging_info->bytesReceived(), 0); + } ASSERT_TRUE(ext_proc_logging_info->upstreamHost() != nullptr); EXPECT_EQ(ext_proc_logging_info->upstreamHost()->cluster().name(), expected_cluster_name_); } @@ -41,6 +45,7 @@ class LoggingTestFilter : public Http::PassThroughFilter { private: std::string logging_id_; std::string expected_cluster_name_; + const bool check_received_bytes_; }; class LoggingTestFilterFactory : public Extensions::HttpFilters::Common::FactoryBase< @@ -53,7 +58,8 @@ class LoggingTestFilterFactory : public Extensions::HttpFilters::Common::Factory Server::Configuration::FactoryContext&) override { return [=](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter(std::make_shared( - proto_config.logging_id(), proto_config.upstream_cluster_name())); + proto_config.logging_id(), proto_config.upstream_cluster_name(), + proto_config.check_received_bytes())); }; } }; diff --git a/test/extensions/filters/http/ext_proc/logging_test_filter.proto b/test/extensions/filters/http/ext_proc/logging_test_filter.proto index c6e5768b6860..293154343a33 100644 --- a/test/extensions/filters/http/ext_proc/logging_test_filter.proto +++ b/test/extensions/filters/http/ext_proc/logging_test_filter.proto @@ -5,4 +5,5 @@ package test.integration.filters; message LoggingTestFilterConfig { string logging_id = 1; string upstream_cluster_name = 2; + bool check_received_bytes = 3; } diff --git a/test/extensions/filters/http/grpc_field_extraction/filter_config_test.cc b/test/extensions/filters/http/grpc_field_extraction/filter_config_test.cc index 55485685d229..e973387b1bd3 100644 --- a/test/extensions/filters/http/grpc_field_extraction/filter_config_test.cc +++ b/test/extensions/filters/http/grpc_field_extraction/filter_config_test.cc @@ -200,6 +200,11 @@ extractions_by_method: { value: { } } + request_field_extractions: { + key: "repeated_supported_types.map" + value: { + } + } } })pb"); *proto_config_.mutable_descriptor_set()->mutable_filename() = diff --git a/test/extensions/filters/http/grpc_field_extraction/filter_test.cc b/test/extensions/filters/http/grpc_field_extraction/filter_test.cc index 166b1b322dd5..b4cf646a959e 100644 --- a/test/extensions/filters/http/grpc_field_extraction/filter_test.cc +++ b/test/extensions/filters/http/grpc_field_extraction/filter_test.cc @@ -639,6 +639,11 @@ extractions_by_method: { value: { } } + request_field_extractions: { + key: "repeated_supported_types.map" + value: { + } + } } })pb"); TestRequestHeaderMapImpl req_headers = @@ -677,6 +682,8 @@ repeated_supported_types: { sfixed64: 1111 float: 1.212 double: 1.313 + map { key: "key1" value: "value1" } + map { key: "key2" value: "value2" } } )pb"); @@ -853,6 +860,25 @@ fields { } } } +} +fields { + key: "repeated_supported_types.map" + value { + list_value { + values { + struct_value { + fields { + key: "key1" + value { string_value: "value1" } + } + fields { + key: "key2" + value { string_value: "value2" } + } + } + } + } + } })pb"); })); EXPECT_EQ(Envoy::Http::FilterDataStatus::Continue, filter_->decodeData(*request_data, true)); diff --git a/test/extensions/filters/http/rate_limit_quota/integration_test.cc b/test/extensions/filters/http/rate_limit_quota/integration_test.cc index 5af41c95c49b..ec4e45a89113 100644 --- a/test/extensions/filters/http/rate_limit_quota/integration_test.cc +++ b/test/extensions/filters/http/rate_limit_quota/integration_test.cc @@ -709,6 +709,95 @@ TEST_P(RateLimitQuotaIntegrationTest, BasicFlowPeriodicalReport) { } } +TEST_P(RateLimitQuotaIntegrationTest, BasicFlowPeriodicalReportWithStreamClosed) { + initializeConfig(); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; + // Send downstream client request to upstream. + sendClientRequest(&custom_headers); + + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + // reports should be built in filter.cc + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + + // Verify the usage report content. + ASSERT_THAT(reports.bucket_quota_usages_size(), 1); + const auto& usage = reports.bucket_quota_usages(0); + // We only send single downstream client request and it is allowed. + EXPECT_EQ(usage.num_requests_allowed(), 1); + EXPECT_EQ(usage.num_requests_denied(), 0); + // It is first report so the time_elapsed is 0. + EXPECT_EQ(Protobuf::util::TimeUtil::DurationToSeconds(usage.time_elapsed()), 0); + + rlqs_stream_->startGrpcStream(); + + // Build the response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); + auto* bucket_action = rlqs_response.add_bucket_action(); + + for (const auto& [key, value] : custom_headers_cpy) { + (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + } + rlqs_stream_->sendGrpcMessage(rlqs_response); + + // Handle the request received by upstream. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + // ValidMatcherConfig. + int report_interval_sec = 60; + // Trigger the report periodically. + for (int i = 0; i < 6; ++i) { + if (i == 2) { + // Close the stream. + rlqs_stream_->finishGrpcStream(Grpc::Status::Ok); + } + + // Advance the time by report_interval. + simTime().advanceTimeWait(std::chrono::milliseconds(report_interval_sec * 1000)); + + // Only perform rlqs server check and response before stream is remotely closed. + if (i < 2) { + // Checks that the rate limit server has received the periodical reports. + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + + // Verify the usage report content. + ASSERT_THAT(reports.bucket_quota_usages_size(), 1); + const auto& usage = reports.bucket_quota_usages(0); + // Report only represents the usage since last report. + // In the periodical report case here, the number of request allowed and denied is 0 since no + // new requests comes in. + EXPECT_EQ(usage.num_requests_allowed(), 0); + EXPECT_EQ(usage.num_requests_denied(), 0); + // time_elapsed equals to periodical reporting interval. + EXPECT_EQ(Protobuf::util::TimeUtil::DurationToSeconds(usage.time_elapsed()), + report_interval_sec); + + // Build the rlqs server response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response2; + auto* bucket_action2 = rlqs_response2.add_bucket_action(); + + for (const auto& [key, value] : custom_headers_cpy) { + (*bucket_action2->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + } + rlqs_stream_->sendGrpcMessage(rlqs_response2); + } + } +} + // RLQS filter is operating in non-blocking mode now, this test could be flaky until the stats are // added to make the test behavior deterministic. (e.g., wait for stats in the test). // Disable the test for now. diff --git a/test/proto/apikeys.proto b/test/proto/apikeys.proto index a38bbd6f65dc..d292f7ad7d42 100644 --- a/test/proto/apikeys.proto +++ b/test/proto/apikeys.proto @@ -94,6 +94,8 @@ message RepeatedSupportedTypes { repeated float float = 12; repeated double double = 13; + + map map = 14; } message UnsupportedTypes { diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index c0d360fa9d91..4d99a19158dc 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -25,9 +25,9 @@ aio-api-bazel==0.0.2 \ --hash=sha256:56e36463d236e477b7e282f2d870185a0b978b50e2c3803c1ebf8b8ac4b18f5b \ --hash=sha256:d3f563b7698e874437d80538a89dd4d79bc37de2e850c846330ae456e3f21dcc # via -r requirements.in -aio-api-github==0.2.5 \ - --hash=sha256:301a357209831ac2bc0fb5c79f8b8795a5363da5cabc2229f10155bdb6d42f5d \ - --hash=sha256:3532d0892e875e8bb6b188c0beba4e8bac9d5147e249ce987bb2beef1e7b711e +aio-api-github==0.2.6 \ + --hash=sha256:71ca0e572a48eab09f3e54267b374fb3d53e246b83f6f23fe1f29f5560acdaed \ + --hash=sha256:be12d6bf612ce2abc85c695ce74547220636f96fe80d4e64cd2de8670db69c32 # via # -r requirements.in # envoy-base-utils @@ -92,83 +92,83 @@ aiohappyeyeballs==2.3.5 \ --hash=sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03 \ --hash=sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105 # via aiohttp -aiohttp==3.10.2 \ - --hash=sha256:01c98041f90927c2cbd72c22a164bb816fa3010a047d264969cf82e1d4bcf8d1 \ - --hash=sha256:0df930015db36b460aa9badbf35eccbc383f00d52d4b6f3de2ccb57d064a6ade \ - --hash=sha256:1238fc979160bc03a92fff9ad021375ff1c8799c6aacb0d8ea1b357ea40932bb \ - --hash=sha256:14dc3fcb0d877911d775d511eb617a486a8c48afca0a887276e63db04d3ee920 \ - --hash=sha256:14eb6b17f6246959fb0b035d4f4ae52caa870c4edfb6170aad14c0de5bfbf478 \ - --hash=sha256:18186a80ec5a701816adbf1d779926e1069392cf18504528d6e52e14b5920525 \ - --hash=sha256:19073d57d0feb1865d12361e2a1f5a49cb764bf81a4024a3b608ab521568093a \ - --hash=sha256:1aa005f060aff7124cfadaa2493f00a4e28ed41b232add5869e129a2e395935a \ - --hash=sha256:2c474af073e1a6763e1c5522bbb2d85ff8318197e4c6c919b8d7886e16213345 \ - --hash=sha256:30a9d59da1543a6f1478c3436fd49ec59be3868bca561a33778b4391005e499d \ - --hash=sha256:341f8ece0276a828d95b70cd265d20e257f5132b46bf77d759d7f4e0443f2906 \ - --hash=sha256:352f3a4e5f11f3241a49b6a48bc5b935fabc35d1165fa0d87f3ca99c1fcca98b \ - --hash=sha256:377220a5efde6f9497c5b74649b8c261d3cce8a84cb661be2ed8099a2196400a \ - --hash=sha256:3988044d1635c7821dd44f0edfbe47e9875427464e59d548aece447f8c22800a \ - --hash=sha256:465e445ec348d4e4bd349edd8b22db75f025da9d7b6dc1369c48e7935b85581e \ - --hash=sha256:494a6f77560e02bd7d1ab579fdf8192390567fc96a603f21370f6e63690b7f3d \ - --hash=sha256:49904f38667c44c041a0b44c474b3ae36948d16a0398a8f8cd84e2bb3c42a069 \ - --hash=sha256:4d1f694b5d6e459352e5e925a42e05bac66655bfde44d81c59992463d2897014 \ - --hash=sha256:4ddb43d06ce786221c0dfd3c91b4892c318eaa36b903f7c4278e7e2fa0dd5102 \ - --hash=sha256:518dc3cb37365255708283d1c1c54485bbacccd84f0a0fb87ed8917ba45eda5b \ - --hash=sha256:53e8898adda402be03ff164b0878abe2d884e3ea03a4701e6ad55399d84b92dc \ - --hash=sha256:54ba10eb5a3481c28282eb6afb5f709aedf53cf9c3a31875ffbdc9fc719ffd67 \ - --hash=sha256:54e36c67e1a9273ecafab18d6693da0fb5ac48fd48417e4548ac24a918c20998 \ - --hash=sha256:562b1153ab7f766ee6b8b357ec777a302770ad017cf18505d34f1c088fccc448 \ - --hash=sha256:5a7ceb2a0d2280f23a02c64cd0afdc922079bb950400c3dd13a1ab2988428aac \ - --hash=sha256:655e583afc639bef06f3b2446972c1726007a21003cd0ef57116a123e44601bc \ - --hash=sha256:685c1508ec97b2cd3e120bfe309a4ff8e852e8a7460f1ef1de00c2c0ed01e33c \ - --hash=sha256:686c87782481fda5ee6ba572d912a5c26d9f98cc5c243ebd03f95222af3f1b0f \ - --hash=sha256:69d73f869cf29e8a373127fc378014e2b17bcfbe8d89134bc6fb06a2f67f3cb3 \ - --hash=sha256:6fe8503b1b917508cc68bf44dae28823ac05e9f091021e0c41f806ebbb23f92f \ - --hash=sha256:74c091a5ded6cb81785de2d7a8ab703731f26de910dbe0f3934eabef4ae417cc \ - --hash=sha256:7cc8f65f5b22304693de05a245b6736b14cb5bc9c8a03da6e2ae9ef15f8b458f \ - --hash=sha256:7dd9c7db94b4692b827ce51dcee597d61a0e4f4661162424faf65106775b40e7 \ - --hash=sha256:7de3ddb6f424af54535424082a1b5d1ae8caf8256ebd445be68c31c662354720 \ - --hash=sha256:7f98e70bbbf693086efe4b86d381efad8edac040b8ad02821453083d15ec315f \ - --hash=sha256:87fab7f948e407444c2f57088286e00e2ed0003ceaf3d8f8cc0f60544ba61d91 \ - --hash=sha256:8bd7be6ff6c162a60cb8fce65ee879a684fbb63d5466aba3fa5b9288eb04aefa \ - --hash=sha256:8da9449a575133828cc99985536552ea2dcd690e848f9d41b48d8853a149a959 \ - --hash=sha256:91b10208b222ddf655c3a3d5b727879d7163db12b634492df41a9182a76edaae \ - --hash=sha256:92f7f4a4dc9cdb5980973a74d43cdbb16286dacf8d1896b6c3023b8ba8436f8e \ - --hash=sha256:9360e3ffc7b23565600e729e8c639c3c50d5520e05fdf94aa2bd859eef12c407 \ - --hash=sha256:947847f07a8f81d7b39b2d0202fd73e61962ebe17ac2d8566f260679e467da7b \ - --hash=sha256:95213b3d79c7e387144e9cb7b9d2809092d6ff2c044cb59033aedc612f38fb6d \ - --hash=sha256:96e010736fc16d21125c7e2dc5c350cd43c528b85085c04bf73a77be328fe944 \ - --hash=sha256:99f81f9c1529fd8e03be4a7bd7df32d14b4f856e90ef6e9cbad3415dbfa9166c \ - --hash=sha256:9bb2834a6f11d65374ce97d366d6311a9155ef92c4f0cee543b2155d06dc921f \ - --hash=sha256:9dfc906d656e14004c5bc672399c1cccc10db38df2b62a13fb2b6e165a81c316 \ - --hash=sha256:9f6f0b252a009e98fe84028a4ec48396a948e7a65b8be06ccfc6ef68cf1f614d \ - --hash=sha256:9fd16b5e1a7bdd14668cd6bde60a2a29b49147a535c74f50d8177d11b38433a7 \ - --hash=sha256:a0fde16d284efcacbe15fb0c1013f0967b6c3e379649239d783868230bf1db42 \ - --hash=sha256:a1a50e59b720060c29e2951fd9f13c01e1ea9492e5a527b92cfe04dd64453c16 \ - --hash=sha256:a4be88807283bd96ae7b8e401abde4ca0bab597ba73b5e9a2d98f36d451e9aac \ - --hash=sha256:ad2274e707be37420d0b6c3d26a8115295fe9d8e6e530fa6a42487a8ca3ad052 \ - --hash=sha256:b2bfdda4971bd79201f59adbad24ec2728875237e1c83bba5221284dbbf57bda \ - --hash=sha256:b52a27a5c97275e254704e1049f4b96a81e67d6205f52fa37a4777d55b0e98ef \ - --hash=sha256:c01fbb87b5426381cd9418b3ddcf4fc107e296fa2d3446c18ce6c76642f340a3 \ - --hash=sha256:c836bf3c7512100219fe1123743fd8dd9a2b50dd7cfb0c3bb10d041309acab4b \ - --hash=sha256:c8e98e1845805f184d91fda6f9ab93d7c7b0dddf1c07e0255924bfdb151a8d05 \ - --hash=sha256:ca2f5abcb0a9a47e56bac173c01e9f6c6e7f27534d91451c5f22e6a35a5a2093 \ - --hash=sha256:cd33d9de8cfd006a0d0fe85f49b4183c57e91d18ffb7e9004ce855e81928f704 \ - --hash=sha256:d611d1a01c25277bcdea06879afbc11472e33ce842322496b211319aa95441bb \ - --hash=sha256:d9076810a5621236e29b2204e67a68e1fe317c8727ee4c9abbfbb1083b442c38 \ - --hash=sha256:d984db6d855de58e0fde1ef908d48fe9a634cadb3cf715962722b4da1c40619d \ - --hash=sha256:dafb4abb257c0ed56dc36f4e928a7341b34b1379bd87e5a15ce5d883c2c90574 \ - --hash=sha256:ddfd2dca3f11c365d6857a07e7d12985afc59798458a2fdb2ffa4a0332a3fd43 \ - --hash=sha256:df59f8486507c421c0620a2c3dce81fbf1d54018dc20ff4fecdb2c106d6e6abc \ - --hash=sha256:e00191d38156e09e8c81ef3d75c0d70d4f209b8381e71622165f22ef7da6f101 \ - --hash=sha256:e2f43d238eae4f0b04f58d4c0df4615697d4ca3e9f9b1963d49555a94f0f5a04 \ - --hash=sha256:e57e21e1167705f8482ca29cc5d02702208d8bf4aff58f766d94bcd6ead838cd \ - --hash=sha256:e8f515d6859e673940e08de3922b9c4a2249653b0ac181169313bd6e4b1978ac \ - --hash=sha256:eabe6bf4c199687592f5de4ccd383945f485779c7ffb62a9b9f1f8a3f9756df8 \ - --hash=sha256:ec6ad66ed660d46503243cbec7b2b3d8ddfa020f984209b3b8ef7d98ce69c3f2 \ - --hash=sha256:f81cd85a0e76ec7b8e2b6636fe02952d35befda4196b8c88f3cec5b4fb512839 \ - --hash=sha256:f9f49bdb94809ac56e09a310a62f33e5f22973d6fd351aac72a39cd551e98194 \ - --hash=sha256:fae962b62944eaebff4f4fddcf1a69de919e7b967136a318533d82d93c3c6bd1 \ - --hash=sha256:fc61f39b534c5d5903490478a0dd349df397d2284a939aa3cbaa2fb7a19b8397 +aiohttp==3.10.3 \ + --hash=sha256:05d66203a530209cbe40f102ebaac0b2214aba2a33c075d0bf825987c36f1f0b \ + --hash=sha256:08bd0754d257b2db27d6bab208c74601df6f21bfe4cb2ec7b258ba691aac64b3 \ + --hash=sha256:0974f3b5b0132edcec92c3306f858ad4356a63d26b18021d859c9927616ebf27 \ + --hash=sha256:09bc79275737d4dc066e0ae2951866bb36d9c6b460cb7564f111cc0427f14844 \ + --hash=sha256:123e5819bfe1b87204575515cf448ab3bf1489cdeb3b61012bde716cda5853e7 \ + --hash=sha256:13031e7ec1188274bad243255c328cc3019e36a5a907978501256000d57a7201 \ + --hash=sha256:166de65e2e4e63357cfa8417cf952a519ac42f1654cb2d43ed76899e2319b1ee \ + --hash=sha256:214277dcb07ab3875f17ee1c777d446dcce75bea85846849cc9d139ab8f5081f \ + --hash=sha256:21650e7032cc2d31fc23d353d7123e771354f2a3d5b05a5647fc30fea214e696 \ + --hash=sha256:24fade6dae446b183e2410a8628b80df9b7a42205c6bfc2eff783cbeedc224a2 \ + --hash=sha256:2a5d0ea8a6467b15d53b00c4e8ea8811e47c3cc1bdbc62b1aceb3076403d551f \ + --hash=sha256:2b0f670502100cdc567188c49415bebba947eb3edaa2028e1a50dd81bd13363f \ + --hash=sha256:2bbc55a964b8eecb341e492ae91c3bd0848324d313e1e71a27e3d96e6ee7e8e8 \ + --hash=sha256:32007fdcaab789689c2ecaaf4b71f8e37bf012a15cd02c0a9db8c4d0e7989fa8 \ + --hash=sha256:3461d9294941937f07bbbaa6227ba799bc71cc3b22c40222568dc1cca5118f68 \ + --hash=sha256:3731a73ddc26969d65f90471c635abd4e1546a25299b687e654ea6d2fc052394 \ + --hash=sha256:38d91b98b4320ffe66efa56cb0f614a05af53b675ce1b8607cdb2ac826a8d58e \ + --hash=sha256:3a9dcdccf50284b1b0dc72bc57e5bbd3cc9bf019060dfa0668f63241ccc16aa7 \ + --hash=sha256:434b3ab75833accd0b931d11874e206e816f6e6626fd69f643d6a8269cd9166a \ + --hash=sha256:43b09f38a67679e32d380fe512189ccb0b25e15afc79b23fbd5b5e48e4fc8fd9 \ + --hash=sha256:44bb159b55926b57812dca1b21c34528e800963ffe130d08b049b2d6b994ada7 \ + --hash=sha256:48665433bb59144aaf502c324694bec25867eb6630fcd831f7a893ca473fcde4 \ + --hash=sha256:50544fe498c81cb98912afabfc4e4d9d85e89f86238348e3712f7ca6a2f01dab \ + --hash=sha256:5337cc742a03f9e3213b097abff8781f79de7190bbfaa987bd2b7ceb5bb0bdec \ + --hash=sha256:56fb94bae2be58f68d000d046172d8b8e6b1b571eb02ceee5535e9633dcd559c \ + --hash=sha256:59c489661edbd863edb30a8bd69ecb044bd381d1818022bc698ba1b6f80e5dd1 \ + --hash=sha256:5ba2e838b5e6a8755ac8297275c9460e729dc1522b6454aee1766c6de6d56e5e \ + --hash=sha256:61ccb867b2f2f53df6598eb2a93329b5eee0b00646ee79ea67d68844747a418e \ + --hash=sha256:671efce3a4a0281060edf9a07a2f7e6230dca3a1cbc61d110eee7753d28405f7 \ + --hash=sha256:673bb6e3249dc8825df1105f6ef74e2eab779b7ff78e96c15cadb78b04a83752 \ + --hash=sha256:6ae9ae382d1c9617a91647575255ad55a48bfdde34cc2185dd558ce476bf16e9 \ + --hash=sha256:6c51ed03e19c885c8e91f574e4bbe7381793f56f93229731597e4a499ffef2a5 \ + --hash=sha256:6d881353264e6156f215b3cb778c9ac3184f5465c2ece5e6fce82e68946868ef \ + --hash=sha256:7084876352ba3833d5d214e02b32d794e3fd9cf21fdba99cff5acabeb90d9806 \ + --hash=sha256:70b4a4984a70a2322b70e088d654528129783ac1ebbf7dd76627b3bd22db2f17 \ + --hash=sha256:71bb1d97bfe7e6726267cea169fdf5df7658831bb68ec02c9c6b9f3511e108bb \ + --hash=sha256:7c126f532caf238031c19d169cfae3c6a59129452c990a6e84d6e7b198a001dc \ + --hash=sha256:7f9159ae530297f61a00116771e57516f89a3de6ba33f314402e41560872b50a \ + --hash=sha256:812121a201f0c02491a5db335a737b4113151926a79ae9ed1a9f41ea225c0e3f \ + --hash=sha256:8542c9e5bcb2bd3115acdf5adc41cda394e7360916197805e7e32b93d821ef93 \ + --hash=sha256:85466b5a695c2a7db13eb2c200af552d13e6a9313d7fa92e4ffe04a2c0ea74c1 \ + --hash=sha256:8d98c604c93403288591d7d6d7d6cc8a63459168f8846aeffd5b3a7f3b3e5e09 \ + --hash=sha256:8da6b48c20ce78f5721068f383e0e113dde034e868f1b2f5ee7cb1e95f91db57 \ + --hash=sha256:926e68438f05703e500b06fe7148ef3013dd6f276de65c68558fa9974eeb59ad \ + --hash=sha256:9743fa34a10a36ddd448bba8a3adc2a66a1c575c3c2940301bacd6cc896c6bf1 \ + --hash=sha256:a541414578ff47c0a9b0b8b77381ea86b0c8531ab37fc587572cb662ccd80b88 \ + --hash=sha256:ab3361159fd3dcd0e48bbe804006d5cfb074b382666e6c064112056eb234f1a9 \ + --hash=sha256:aed12a54d4e1ee647376fa541e1b7621505001f9f939debf51397b9329fd88b9 \ + --hash=sha256:af4dbec58e37f5afff4f91cdf235e8e4b0bd0127a2a4fd1040e2cad3369d2f06 \ + --hash=sha256:b031ce229114825f49cec4434fa844ccb5225e266c3e146cb4bdd025a6da52f1 \ + --hash=sha256:b22cae3c9dd55a6b4c48c63081d31c00fc11fa9db1a20c8a50ee38c1a29539d2 \ + --hash=sha256:b51aef59370baf7444de1572f7830f59ddbabd04e5292fa4218d02f085f8d299 \ + --hash=sha256:b69d832e5f5fa15b1b6b2c8eb6a9fd2c0ec1fd7729cb4322ed27771afc9fc2ac \ + --hash=sha256:b84857b66fa6510a163bb083c1199d1ee091a40163cfcbbd0642495fed096204 \ + --hash=sha256:b97dc9a17a59f350c0caa453a3cb35671a2ffa3a29a6ef3568b523b9113d84e5 \ + --hash=sha256:ba562736d3fbfe9241dad46c1a8994478d4a0e50796d80e29d50cabe8fbfcc3f \ + --hash=sha256:bac352fceed158620ce2d701ad39d4c1c76d114255a7c530e057e2b9f55bdf9f \ + --hash=sha256:baec1eb274f78b2de54471fc4c69ecbea4275965eab4b556ef7a7698dee18bf2 \ + --hash=sha256:bc8e9f15939dacb0e1f2d15f9c41b786051c10472c7a926f5771e99b49a5957f \ + --hash=sha256:bf75716377aad2c718cdf66451c5cf02042085d84522aec1f9246d3e4b8641a6 \ + --hash=sha256:c124b9206b1befe0491f48185fd30a0dd51b0f4e0e7e43ac1236066215aff272 \ + --hash=sha256:c9ed607dbbdd0d4d39b597e5bf6b0d40d844dfb0ac6a123ed79042ef08c1f87e \ + --hash=sha256:cc36cbdedf6f259371dbbbcaae5bb0e95b879bc501668ab6306af867577eb5db \ + --hash=sha256:cd788602e239ace64f257d1c9d39898ca65525583f0fbf0988bcba19418fe93f \ + --hash=sha256:d1100e68e70eb72eadba2b932b185ebf0f28fd2f0dbfe576cfa9d9894ef49752 \ + --hash=sha256:d35235a44ec38109b811c3600d15d8383297a8fab8e3dec6147477ec8636712a \ + --hash=sha256:d3e66d5b506832e56add66af88c288c1d5ba0c38b535a1a59e436b300b57b23e \ + --hash=sha256:d5548444ef60bf4c7b19ace21f032fa42d822e516a6940d36579f7bfa8513f9c \ + --hash=sha256:d5a9ec959b5381271c8ec9310aae1713b2aec29efa32e232e5ef7dcca0df0279 \ + --hash=sha256:d73b073a25a0bb8bf014345374fe2d0f63681ab5da4c22f9d2025ca3e3ea54fc \ + --hash=sha256:e021c4c778644e8cdc09487d65564265e6b149896a17d7c0f52e9a088cc44e1b \ + --hash=sha256:e1128c5d3a466279cb23c4aa32a0f6cb0e7d2961e74e9e421f90e74f75ec1edf \ + --hash=sha256:e8cc0564b286b625e673a2615ede60a1704d0cbbf1b24604e28c31ed37dc62aa \ + --hash=sha256:f25d6c4e82d7489be84f2b1c8212fafc021b3731abdb61a563c90e37cced3a21 \ + --hash=sha256:f817a54059a4cfbc385a7f51696359c642088710e731e8df80d0607193ed2b73 \ + --hash=sha256:fda91ad797e4914cca0afa8b6cccd5d2b3569ccc88731be202f6adce39503189 # via # -r requirements.in # aio-api-github @@ -515,9 +515,9 @@ envoy-code-check==0.5.13 \ --hash=sha256:58c31be3ba1a3273eec8a76d1dcfe1a3ae5eae4730ca9d70a85fec0d641846c4 \ --hash=sha256:6c568d477642abdf7b41a0b6a5bb21fd480d92e500c53120837a01d4436d8591 # via -r requirements.in -envoy-dependency-check==0.1.12 \ - --hash=sha256:4673cb4cf9c0e2c55b2a0e0b39df3b8df9993d6524c6edb9527d3c8fb1ec24e2 \ - --hash=sha256:7443e530a2a9155d1e114b8a99d9355bbbe73005b0c96ee653907912ae368f3c +envoy-dependency-check==0.1.13 \ + --hash=sha256:4337b9c4129ae723dc92f70733b167a8dde187368d873687c6c54732d6fb5e48 \ + --hash=sha256:795e885eccd072d7878dc8ce11fe9f84761f0e449603e583fdab5e9e17111af2 # via -r requirements.in envoy-distribution-distrotest==0.0.10 \ --hash=sha256:83e912c48da22eb3e514fc1142247d33eb7ed0d59e94eca2ffbd178a26fbf808 \ diff --git a/tools/gcs/BUILD b/tools/gcs/BUILD new file mode 100644 index 000000000000..c42a57de4cfc --- /dev/null +++ b/tools/gcs/BUILD @@ -0,0 +1,16 @@ +load("@envoy_repo//:path.bzl", "PATH") +load("//bazel:envoy_build_system.bzl", "envoy_package") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +sh_binary( + name = "upload", + srcs = ["upload.sh"], + data = ["//tools/gsutil"], + env = { + "ENVOY_SOURCE_DIR": PATH, + "GSUTIL": "$(location //tools/gsutil)", + }, +) diff --git a/ci/upload_gcs_artifact.sh b/tools/gcs/upload.sh similarity index 52% rename from ci/upload_gcs_artifact.sh rename to tools/gcs/upload.sh index f088f60299b8..f6e26516ce99 100755 --- a/ci/upload_gcs_artifact.sh +++ b/tools/gcs/upload.sh @@ -2,16 +2,43 @@ set -e -o pipefail +GCS_ARTIFACT_BUCKET="${1:-}" +GCP_SERVICE_ACCOUNT_KEY_PATH="${2:-}" +UPLOAD_DIRECTORY="${3:-}" +TARGET_SUFFIX="${4:-}" +REDIRECT_PATH="${5:-}" + +# +# $ bazel run //tools/gcs:upload BUCKETNAME KEY_PATH REDIRECT_PATH + + +if [[ -z "${GSUTIL}" ]]; then + echo "GSUTIL is not set, not uploading artifacts." + exit 1 +fi + +if [[ -z "${ENVOY_SOURCE_DIR}" ]]; then + echo "ENVOY_SOURCE_DIR is not set, not uploading artifacts." + exit 1 +fi + if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then echo "Artifact bucket is not set, not uploading artifacts." exit 1 fi -read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTION_LIST:-}" -read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTION_LIST:-}" - if [[ ! -s "${GCP_SERVICE_ACCOUNT_KEY_PATH}" ]]; then - echo "GCP key is not set, not uploading artifacts." + echo "GCP key path is not set, not uploading artifacts." + exit 1 +fi + +if [[ -z "${UPLOAD_DIRECTORY}" ]]; then + echo "UPLOAD_DIRECTORY is not set, not uploading artifacts." + exit 1 +fi + +if [[ -z "${TARGET_SUFFIX}" ]]; then + echo "TARGET_SUFFIX is not set, not uploading artifacts." exit 1 fi @@ -20,13 +47,6 @@ cat < ~/.boto gs_service_key_file=${GCP_SERVICE_ACCOUNT_KEY_PATH} EOF -SOURCE_DIRECTORY="$1" -TARGET_SUFFIX="$2" - -if [ ! -d "${SOURCE_DIRECTORY}" ]; then - echo "ERROR: ${SOURCE_DIRECTORY} is not found." - exit 1 -fi # Upload to the last commit sha (first 7 chars) # the bucket is either `envoy-postsubmit` or `envoy-pr` @@ -40,32 +60,31 @@ fi # https://storage.googleapis.com/envoy-pr/28462/docs/index.html # -UPLOAD_PATH="$(git rev-parse HEAD | head -c7)" -REDIRECT_PATH="${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}}" +UPLOAD_PATH="$(git -C "${ENVOY_SOURCE_DIR}" rev-parse HEAD | head -c7)" GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${UPLOAD_PATH}/${TARGET_SUFFIX}" echo "Uploading to gs://${GCS_LOCATION} ..." -bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" \ - //tools/gsutil \ - -- -mq rsync \ - -dr "${SOURCE_DIRECTORY}" \ +"${GSUTIL}" \ + -mq rsync \ + -dr "${UPLOAD_DIRECTORY}" \ "gs://${GCS_LOCATION}" +if [[ -z "${REDIRECT_PATH}" ]]; then + echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}" >&2 + exit 0 +fi + TMP_REDIRECT="/tmp/redirect/${REDIRECT_PATH}/${TARGET_SUFFIX}" mkdir -p "$TMP_REDIRECT" echo "" \ > "${TMP_REDIRECT}/index.html" GCS_REDIRECT="${GCS_ARTIFACT_BUCKET}/${REDIRECT_PATH}/${TARGET_SUFFIX}" -echo "Uploading redirect to gs://${GCS_REDIRECT} ..." -bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" \ - //tools/gsutil \ - -- -h "Cache-Control:no-cache,max-age=0" \ +echo "Uploading redirect to gs://${GCS_REDIRECT} ..." >&2 +"${GSUTIL}" \ + -h "Cache-Control:no-cache,max-age=0" \ -mq rsync \ -dr "${TMP_REDIRECT}" \ "gs://${GCS_REDIRECT}" -if [[ "${COVERAGE_FAILED}" -eq 1 ]]; then - echo "##vso[task.logissue type=error]Coverage failed, check artifact at: https://storage.googleapis.com/${GCS_LOCATION}/index.html" -fi - -echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html" +echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_REDIRECT}/index.html" >&2 +echo "https://storage.googleapis.com/${GCS_REDIRECT}/index.html"